repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
clan2000/data-science-from-scratch | code-python3/linear_algebra.py | 12 | 3566 | # -*- coding: iso-8859-15 -*-
import re, math, random # regexes, math functions, random numbers
import matplotlib.pyplot as plt # pyplot
from collections import defaultdict, Counter
from functools import partial, reduce
#
# functions for working with vectors
#
def vector_add(v, w):
"""adds two vectors componentwise"""
return [v_i + w_i for v_i, w_i in zip(v,w)]
def vector_subtract(v, w):
"""subtracts two vectors componentwise"""
return [v_i - w_i for v_i, w_i in zip(v,w)]
def vector_sum(vectors):
return reduce(vector_add, vectors)
def scalar_multiply(c, v):
return [c * v_i for v_i in v]
def vector_mean(vectors):
"""compute the vector whose i-th element is the mean of the
i-th elements of the input vectors"""
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
def dot(v, w):
"""v_1 * w_1 + ... + v_n * w_n"""
return sum(v_i * w_i for v_i, w_i in zip(v, w))
def sum_of_squares(v):
"""v_1 * v_1 + ... + v_n * v_n"""
return dot(v, v)
def magnitude(v):
return math.sqrt(sum_of_squares(v))
def squared_distance(v, w):
return sum_of_squares(vector_subtract(v, w))
def distance(v, w):
return math.sqrt(squared_distance(v, w))
#
# functions for working with matrices
#
def shape(A):
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
def get_row(A, i):
return A[i]
def get_column(A, j):
return [A_i[j] for A_i in A]
def make_matrix(num_rows, num_cols, entry_fn):
"""returns a num_rows x num_cols matrix
whose (i,j)-th entry is entry_fn(i, j)"""
return [[entry_fn(i, j) for j in range(num_cols)]
for i in range(num_rows)]
def is_diagonal(i, j):
"""1's on the 'diagonal', 0's everywhere else"""
return 1 if i == j else 0
identity_matrix = make_matrix(5, 5, is_diagonal)
# user 0 1 2 3 4 5 6 7 8 9
#
friendships = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0], # user 0
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0], # user 1
[1, 1, 0, 1, 0, 0, 0, 0, 0, 0], # user 2
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0], # user 3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0], # user 4
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0], # user 5
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 6
[0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # user 7
[0, 0, 0, 0, 0, 0, 1, 1, 0, 1], # user 8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]] # user 9
#####
# DELETE DOWN
#
def matrix_add(A, B):
if shape(A) != shape(B):
raise ArithmeticError("cannot add matrices with different shapes")
num_rows, num_cols = shape(A)
def entry_fn(i, j): return A[i][j] + B[i][j]
return make_matrix(num_rows, num_cols, entry_fn)
def make_graph_dot_product_as_vector_projection(plt):
v = [2, 1]
w = [math.sqrt(.25), math.sqrt(.75)]
c = dot(v, w)
vonw = scalar_multiply(c, w)
o = [0,0]
plt.arrow(0, 0, v[0], v[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("v", v, xytext=[v[0] + 0.1, v[1]])
plt.arrow(0 ,0, w[0], w[1],
width=0.002, head_width=.1, length_includes_head=True)
plt.annotate("w", w, xytext=[w[0] - 0.1, w[1]])
plt.arrow(0, 0, vonw[0], vonw[1], length_includes_head=True)
plt.annotate(u"(v•w)w", vonw, xytext=[vonw[0] - 0.1, vonw[1] + 0.1])
plt.arrow(v[0], v[1], vonw[0] - v[0], vonw[1] - v[1],
linestyle='dotted', length_includes_head=True)
plt.scatter(*zip(v,w,o),marker='.')
plt.axis('equal')
plt.show()
| unlicense |
jgdwyer/ML-convection | sknn_jgd/nn.py | 3 | 26071 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, unicode_literals, print_function)
__all__ = ['Regressor', 'Classifier', 'Layer', 'Convolution']
import os
import sys
import time
import logging
import itertools
import collections
log = logging.getLogger('sknn')
import numpy
import theano
class ansi:
BOLD = '\033[1;97m'
WHITE = '\033[0;97m'
YELLOW = '\033[0;33m'
RED = '\033[0;31m'
GREEN = '\033[0;32m'
BLUE = '\033[0;94m'
ENDC = '\033[0m'
class Layer(object):
"""
Specification for a layer to be passed to the neural network during construction. This
includes a variety of parameters to configure each layer based on its activation type.
Parameters
----------
type: str
Select which activation function this layer should use, as a string. Specifically,
options are ``Rectifier``, ``Sigmoid``, ``Tanh``, and ``ExpLin`` for non-linear layers
and ``Linear`` or ``Softmax`` for output layers.
name: str, optional
You optionally can specify a name for this layer, and its parameters
will then be accessible to scikit-learn via a nested sub-object. For example,
if name is set to ``layer1``, then the parameter ``layer1__units`` from the network
is bound to this layer's ``units`` variable.
The name defaults to ``hiddenN`` where N is the integer index of that layer, and the
final layer is always ``output`` without an index.
units: int
The number of units (also known as neurons) in this layer. This applies to all
layer types except for convolution.
weight_decay: float, optional
The coefficient for L1 or L2 regularization of the weights. For example, a value of
0.0001 is multiplied by the L1 or L2 weight decay equation.
dropout: float, optional
The ratio of inputs to drop out for this layer during training. For example, 0.25
means that 25% of the inputs will be excluded for each training sample, with the
remaining inputs being renormalized accordingly.
normalize: str, optional
Enable normalization of this layer. Can be either `batch` for batch normalization
or (soon) `weights` for weight normalization. Default is no normalization.
frozen: bool, optional
Specify whether to freeze a layer's parameters so they are not adjusted during the
training. This is useful when relying on pre-trained neural networks.
warning: None
You should use keyword arguments after `type` when initializing this object. If not,
the code will raise an AssertionError.
"""
def __init__(
self,
type,
warning=None,
name=None,
units=None,
weight_decay=None,
dropout=None,
normalize=None,
frozen=False):
assert warning is None,\
"Specify layer parameters as keyword arguments, not positional arguments."
if type not in ['Rectifier', 'Sigmoid', 'Tanh', 'Linear', 'Softmax', 'Gaussian', 'ExpLin']:
raise NotImplementedError("Layer type `%s` is not implemented." % type)
self.name = name
self.type = type
self.units = units
self.weight_decay = weight_decay
self.dropout = dropout
self.normalize = normalize
self.frozen = frozen
def set_params(self, **params):
"""Setter for internal variables that's compatible with ``scikit-learn``.
"""
for k, v in params.items():
if k not in self.__dict__:
raise ValueError("Invalid parameter `%s` for layer `%s`." % (k, self.name))
self.__dict__[k] = v
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
copy = self.__dict__.copy()
del copy['type']
params = ", ".join(["%s=%r" % (k, v) for k, v in copy.items() if v is not None])
return "<sknn.nn.%s `%s`: %s>" % (self.__class__.__name__, self.type, params)
class Native(object):
"""Special type of layer that is handled directly to the backend (e.g. Lasagne). This
can be used to construct more advanced networks that are not yet supported by the
default interface.
Note that using this as a layer type means your code may not be compatible with future
revisions or other backends, and that serialization may be affected.
Parameters
----------
constructor: class or callable
The layer type usable directly by the backend (e.g. Lasagne). This can also
be a callable function that acts as a layer constructor.
*args: list of arguments
All positional arguments are passed directly to the constructor when the
neural network is initialized.
**kwargs: dictionary of named arguments
All named arguments are passed to the constructor directly also, with the exception
of the parameters ``name``, ``units``, ``frozen``, ``weight_decay``, ``normalize``
which take on the same role as in :class:`sknn.nn.Layer`.
"""
def __init__(self, constructor, *args, **keywords):
for attr in ['name', 'units', 'frozen', 'weight_decay', 'normalize']:
setattr(self, attr, keywords.pop(attr, None))
self.type = constructor
self.args = args
self.keywords = keywords
class Convolution(Layer):
"""
Specification for a convolution layer to be passed to the neural network in construction.
This includes a variety of convolution-specific parameters to configure each layer, as well
as activation-specific parameters.
Parameters
----------
type: str
Select which activation function this convolution layer should use, as a string.
For hidden layers, you can use the following convolution types ``Rectifier``,
``ExpLin``, ``Sigmoid``, ``Tanh`` or ``Linear``.
name: str, optional
You optionally can specify a name for this layer, and its parameters
will then be accessible to scikit-learn via a nested sub-object. For example,
if name is set to ``layer1``, then the parameter ``layer1__units`` from the network
is bound to this layer's ``units`` variable.
The name defaults to ``hiddenN`` where N is the integer index of that layer, and the
final layer is always ``output`` without an index.
channels: int
Number of output channels for the convolution layers. Each channel has its own
set of shared weights which are trained by applying the kernel over the image.
kernel_shape: tuple of ints
A two-dimensional tuple of integers corresponding to the shape of the kernel when
convolution is used. For example, this could be a square kernel `(3,3)` or a full
horizontal or vertical kernel on the input matrix, e.g. `(N,1)` or `(1,N)`.
kernel_stride: tuple of ints, optional
A two-dimensional tuple of integers that represents the steps taken by the kernel
through the input image. By default, this is set to `(1,1)` and can be
customized separately to pooling.
border_mode: str
String indicating the way borders in the image should be processed, one of two options:
* `valid` — Only pixels from input where the kernel fits within bounds are processed.
* `full` — All pixels from input are processed, and the boundaries are zero-padded.
* `same` — The output resolution is set to the exact same as the input.
The size of the output will depend on this mode, for `full` it's identical to the input,
but for `valid` (default) it will be smaller or equal.
pool_shape: tuple of ints, optional
A two-dimensional tuple of integers corresponding to the pool size for downsampling.
This should be square, for example `(2,2)` to reduce the size by half, or `(4,4)` to make
the output a quarter of the original.
Pooling is applied after the convolution and calculation of its activation.
pool_type: str, optional
Type of the pooling to be used; can be either `max` or `mean`. If a `pool_shape` is
specified the default is to take the maximum value of all inputs that fall into this
pool. Otherwise, the default is None and no pooling is used for performance.
scale_factor: tuple of ints, optional
A two-dimensional tuple of integers corresponding to upscaling ration. This should be
square, for example `(2,2)` to increase the size by double, or `(4,4)` to make the
output four times the original.
Upscaling is applied before the convolution and calculation of its activation.
weight_decay: float, optional
The coefficient for L1 or L2 regularization of the weights. For example, a value of
0.0001 is multiplied by the L1 or L2 weight decay equation.
dropout: float, optional
The ratio of inputs to drop out for this layer during training. For example, 0.25
means that 25% of the inputs will be excluded for each training sample, with the
remaining inputs being renormalized accordingly.
normalize: str, optional
Enable normalization of this layer. Can be either `batch` for batch normalization
or (soon) `weights` for weight normalization. Default is no normalization.
frozen: bool, optional
Specify whether to freeze a layer's parameters so they are not adjusted during the
training. This is useful when relying on pre-trained neural networks.
warning: None
You should use keyword arguments after `type` when initializing this object. If not,
the code will raise an AssertionError.
"""
def __init__(
self,
type,
warning=None,
name=None,
channels=None,
kernel_shape=None,
kernel_stride=None,
border_mode='valid',
pool_shape=None,
pool_type=None,
scale_factor=None,
weight_decay=None,
dropout=None,
normalize=None,
frozen=False):
assert warning is None,\
"Specify layer parameters as keyword arguments, not positional arguments."
if type not in ['Rectifier', 'Sigmoid', 'Tanh', 'Linear', 'ExpLin']:
raise NotImplementedError("Convolution type `%s` is not implemented." % (type,))
if border_mode not in ['valid', 'full', 'same']:
raise NotImplementedError("Convolution border_mode `%s` is not implemented." % (border_mode,))
super(Convolution, self).__init__(
type,
name=name,
weight_decay=weight_decay,
dropout=dropout,
normalize=normalize,
frozen=frozen)
self.channels = channels
self.kernel_shape = kernel_shape
self.kernel_stride = kernel_stride or (1,1)
self.border_mode = border_mode
self.pool_shape = pool_shape or (1,1)
self.pool_type = pool_type or ('max' if pool_shape else None)
self.scale_factor = scale_factor or (1,1)
class NeuralNetwork(object):
"""
Abstract base class for wrapping all neural network functionality from PyLearn2,
common to multi-layer perceptrons in :mod:`sknn.mlp` and auto-encoders in
in :mod:`sknn.ae`.
Parameters
----------
layers: list of Layer
An iterable sequence of each layer each as a :class:`sknn.mlp.Layer` instance that
contains its type, optional name, and any paramaters required.
* For hidden layers, you can use the following layer types:
``Rectifier``, ``ExpLin``, ``Sigmoid``, ``Tanh``, or ``Convolution``.
* For output layers, you can use the following layer types:
``Linear`` or ``Softmax``.
It's possible to mix and match any of the layer types, though most often
you should probably use hidden and output types as recommended here. Typically,
the last entry in this ``layers`` list should contain ``Linear`` for regression,
or ``Softmax`` for classification.
random_state: int, optional
Seed for the initialization of the neural network parameters (e.g.
weights and biases). This is fully deterministic.
parameters: list of tuple of array-like, optional
A list of ``(weights, biases)`` tuples to be reloaded for each layer, in the same
order as ``layers`` was specified. Useful for initializing with pre-trained
networks.
learning_rule: str, optional
Name of the learning rule used during stochastic gradient descent,
one of ``sgd``, ``momentum``, ``nesterov``, ``adadelta``, ``adagrad`` or
``rmsprop`` at the moment. The default is vanilla ``sgd``.
learning_rate: float, optional
Real number indicating the default/starting rate of adjustment for
the weights during gradient descent. Different learning rules may
take this into account differently. Default is ``0.01``.
learning_momentum: float, optional
Real number indicating the momentum factor to be used for the
learning rule 'momentum'. Default is ``0.9``.
batch_size: int, optional
Number of training samples to group together when performing stochastic
gradient descent (technically, a "minibatch"). By default each sample is
treated on its own, with ``batch_size=1``. Larger batches are usually faster.
n_iter: int, optional
The number of iterations of gradient descent to perform on the
neural network's weights when training with ``fit()``.
n_stable: int, optional
Number of interations after which training should return when the validation
error remains (near) constant. This is usually a sign that the data has been
fitted, or that optimization may have stalled. If no validation set is specified,
then stability is judged based on the training error. Default is ``10``.
f_stable: float, optional
Threshold under which the validation error change is assumed to be stable, to
be used in combination with `n_stable`. This is calculated as a relative ratio
of improvement, so if the results are only 0.1% better training is considered
stable. The training set is used as fallback if there's no validation set. Default
is ``0.001`.
valid_set: tuple of array-like, optional
Validation set (X_v, y_v) to be used explicitly while training. Both
arrays should have the same size for the first dimention, and the second
dimention should match with the training data specified in ``fit()``.
valid_size: float, optional
Ratio of the training data to be used for validation. 0.0 means no
validation, and 1.0 would mean there's no training data! Common values are
0.1 or 0.25.
normalize: string, optional
Enable normalization for all layers. Can be either `batch` for batch normalization
or (soon) `weights` for weight normalization. Default is no normalization.
regularize: string, optional
Which regularization technique to use on the weights, for example ``L2`` (most
common) or ``L1`` (quite rare), as well as ``dropout``. By default, there's no
regularization, unless another parameter implies it should be enabled, e.g. if
``weight_decay`` or ``dropout_rate`` are specified.
weight_decay: float, optional
The coefficient used to multiply either ``L1`` or ``L2`` equations when computing
the weight decay for regularization. If ``regularize`` is specified, this defaults
to 0.0001.
dropout_rate: float, optional
What rate to use for drop-out training in the inputs (jittering) and the
hidden layers, for each training example. Specify this as a ratio of inputs
to be randomly excluded during training, e.g. 0.75 means only 25% of inputs
will be included in the training.
loss_type: string, optional
The cost function to use when training the network. There are two valid options:
* ``mse`` — Use mean squared error, for learning to predict the mean of the data.
* ``mae`` — Use mean average error, for learning to predict the median of the data.
* ``mcc`` — Use mean categorical cross-entropy, particularly for classifiers.
The default option is ``mse`` for regressors and ``mcc`` for classifiers, but ``mae`` can
only be applied to layers of type ``Linear`` or ``Gaussian`` and they must be used as
the output layer (PyLearn2 only).
callback: callable or dict, optional
An observer mechanism that exposes information about the inner training loop. This is
either a single function that takes ``cbs(event, **variables)`` as a parameter, or a
dictionary of functions indexed by on `event` string that conforms to ``cb(**variables)``.
There are multiple events sent from the inner training loop:
* ``on_train_start`` — Called when the main training function is entered.
* ``on_epoch_start`` — Called the first thing when a new iteration starts.
* ``on_batch_start`` — Called before an individual batch is processed.
* ``on_batch_finish`` — Called after that individual batch is processed.
* ``on_epoch_finish`` — Called the first last when the iteration is done.
* ``on_train_finish`` — Called just before the training function exits.
For each function, the ``variables`` dictionary passed contains all local variables within
the training implementation.
debug: bool, optional
Should the underlying training algorithms perform validation on the data
as it's optimizing the model? This makes things slower, but errors can
be caught more effectively. Default is off.
verbose: bool, optional
How to initialize the logging to display the results during training. If there is
already a logger initialized, either ``sknn`` or the root logger, then this function
does nothing. Otherwise:
* ``False`` — Setup new logger that shows only warnings and errors.
* ``True`` — Setup a new logger that displays all debug messages.
* ``None`` — Don't setup a new logger under any condition (default).
Using the built-in python ``logging`` module, you can control the detail and style of
output by customising the verbosity level and formatter for ``sknn`` logger.
warning: None
You should use keyword arguments after `layers` when initializing this object. If not,
the code will raise an AssertionError.
"""
def __init__(
self,
layers,
warning=None,
parameters=None,
random_state=None,
learning_rule='sgd',
learning_rate=0.01,
learning_momentum=0.9,
normalize=None,
regularize=None,
weight_decay=None,
dropout_rate=None,
batch_size=1,
n_iter=None,
n_stable=10,
f_stable=0.001,
valid_set=None,
valid_size=0.0,
loss_type=None,
callback=None,
debug=False,
verbose=None,
**params):
assert warning is None,\
"Specify network parameters as keyword arguments, not positional arguments."
self.layers = []
for i, layer in enumerate(layers):
assert isinstance(layer, Layer) or isinstance(layer, Native),\
"Specify each layer as an instance of a `sknn.mlp.Layer` object."
# Layer names are optional, if not specified then generate one.
if layer.name is None:
layer.name = ("hidden%i" % i) if i < len(layers)-1 else "output"
# sklearn may pass layers in as additional named parameters, remove them.
if layer.name in params:
del params[layer.name]
self.layers.append(layer)
# Don't support any additional parameters that are not in the constructor.
# These are specified only so `get_params()` can return named layers, for double-
# underscore syntax to work.
assert len(params) == 0,\
"The specified additional parameters are unknown: %s." % ','.join(params.keys())
# Basic checking of the freeform string options.
assert regularize in (None, 'L1', 'L2', 'dropout'),\
"Unknown type of regularization specified: %s." % regularize
assert loss_type in ('mse', 'mae', 'mcc', None),\
"Unknown loss function type specified: %s." % loss_type
self.weights = parameters
self.random_state = random_state
self.learning_rule = learning_rule
self.learning_rate = learning_rate
self.learning_momentum = learning_momentum
self.normalize = normalize
self.regularize = regularize or ('dropout' if dropout_rate else None)\
or ('L2' if weight_decay else None)
self.weight_decay = weight_decay
self.dropout_rate = dropout_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.n_stable = n_stable
self.f_stable = f_stable
self.valid_set = valid_set
self.valid_size = valid_size
self.loss_type = loss_type
self.debug = debug
self.verbose = verbose
self.callback = callback
self.auto_enabled = {}
self._backend = None
self._create_logger()
self._setup()
def _setup(self):
raise NotImplementedError("NeuralNetwork is an abstract class; "
"use the mlp.Classifier or mlp.Regressor instead.")
@property
def is_initialized(self):
"""Check if the neural network was setup already.
"""
return self._backend is not None and self._backend.is_initialized
def is_convolution(self, input=None, output=False):
"""Check whether this neural network includes convolution layers in the first
or last position.
Parameters
----------
input : boolean, optional
Whether the first layer should be checked for convolution. Default True.
output : boolean, optional
Whether the last layer should be checked for convolution. Default False.
Returns
-------
is_conv : boolean
True if either of the specified layers are indeed convolution, False otherwise.
"""
check_output = output
check_input = False if check_output and input is None else True
i = check_input and isinstance(self.layers[0], Convolution)
o = check_output and isinstance(self.layers[-1], Convolution)
return i or o
@property
def is_classifier(self):
"""Is this neural network instanced as a classifier or regressor?"""
return False
def _create_logger(self):
# If users have configured logging already, assume they know best.
if len(log.handlers) > 0 or len(log.parent.handlers) > 0 or self.verbose is None:
return
# Otherwise setup a default handler and formatter based on verbosity.
lvl = logging.DEBUG if self.verbose else logging.WARNING
fmt = logging.Formatter("%(message)s")
hnd = logging.StreamHandler(stream=sys.stdout)
hnd.setFormatter(fmt)
hnd.setLevel(lvl)
log.addHandler(hnd)
log.setLevel(lvl)
def get_parameters(self):
"""Extract the neural networks weights and biases layer by layer. Only valid
once the neural network has been initialized, for example via `fit()` function.
Returns
-------
params : list of tuples
For each layer in the order they are passed to the constructor, a named-tuple
of three items `weights`, `biases` (both numpy arrays) and `name` (string)
in that order.
"""
assert self._backend is not None,\
"Backend was not initialized; could not retrieve network parameters."
P = collections.namedtuple('Parameters', 'weights biases layer')
return [P(w, b, s.name) for s, (w, b) in zip(self.layers, self._backend._mlp_to_array())]
def set_parameters(self, storage):
"""Store the given weighs and biases into the neural network. If the neural network
has not been initialized, use the `weights` list as construction parameter instead.
Otherwise if the neural network is initialized, this function will extract the parameters
from the input list or dictionary and store them accordingly.
Parameters
----------
storage : list of tuples, or dictionary of tuples
Either a list of tuples for each layer, storing two items `weights` and `biases` in
the exact same order as construction. Alternatively, if this is a dictionary, a string
to tuple mapping for each layer also storing `weights` and `biases` but not necessarily
for all layers.
"""
# In case the class is not initialized, store the parameters for later during _initialize.
if self._backend is None:
self.weights = storage
return
if isinstance(storage, dict):
layers = [storage.get(l.name, None) for l in self.layers]
else:
layers = storage
return self._backend._array_to_mlp(layers, self._backend.mlp)
| apache-2.0 |
weixuanfu/tpot | tests/test_log_file.py | 1 | 3249 | # -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson ([email protected])
- Weixuan Fu ([email protected])
- Daniel Angell ([email protected])
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
from tpot import TPOTClassifier
from sklearn.datasets import load_iris
from nose.tools import assert_equal, assert_true
import os
import re
from tempfile import mkdtemp
from shutil import rmtree
data = load_iris()
X = data['data']
y = data['target']
POP_SIZE = 2
GEN_SIZE = 2
def test_log_file_verbosity_1():
""" Set verbosity as 1. Assert log_file parameter to generate log file. """
cachedir = mkdtemp()
file_name = cachedir + "progress_verbose_1.log"
tracking_progress_file = open(file_name, "w")
tpot_obj = TPOTClassifier(
population_size=POP_SIZE,
generations=GEN_SIZE,
verbosity=1,
log_file=tracking_progress_file
)
tpot_obj.fit(X, y)
assert_equal(os.path.getsize(file_name), 0)
rmtree(cachedir)
def test_log_file_verbosity_2():
""" Set verbosity as 2. Assert log_file parameter to generate log file. """
cachedir = mkdtemp()
file_name = cachedir + "progress_verbose_2.log"
tracking_progress_file = open(file_name, "w")
tpot_obj = TPOTClassifier(
population_size=POP_SIZE,
generations=GEN_SIZE,
verbosity=2,
log_file=tracking_progress_file
)
tpot_obj.fit(X, y)
assert_equal(os.path.getsize(file_name) > 0, True)
check_generations(file_name, GEN_SIZE)
rmtree(cachedir)
def test_log_file_verbose_3():
""" Set verbosity as 3. Assert log_file parameter to generate log file. """
cachedir = mkdtemp()
file_name = cachedir + "progress_verbosity_3.log"
tracking_progress_file = open(file_name, "w")
tpot_obj = TPOTClassifier(
population_size=POP_SIZE,
generations=GEN_SIZE,
verbosity=3,
log_file=tracking_progress_file
)
tpot_obj.fit(X, y)
assert_equal(os.path.getsize(file_name) > 0, True)
check_generations(file_name, GEN_SIZE)
rmtree(cachedir)
def check_generations(file_name, generations):
""" Assert generation log message is present in log_file. """
with open(file_name, "r") as file:
file_text = file.read()
for gen in range(generations):
assert_true(re.search("Generation {0} - .+".format(gen+1), file_text))
| lgpl-3.0 |
mikeireland/chronostar | projects/scocen/prepare_component_members_for_further_splitting.py | 1 | 2931 | """
Take members of a component and write them in a separate table.
I would like to run chronostar just on this and split the data further
into at more components.
"""
import numpy as np
from astropy.table import Table
import matplotlib.pyplot as plt
############################################
# Some things are the same for all the plotting scripts and we put
# this into a single library to avoid confusion.
import scocenlib as lib
data_filename = lib.data_filename
############################################
# Minimal probability required for membership
#~ pmin_membership = 0.01
#~ pmin_membership = 0.2
#~ pmin_membership = 0.5
############################################
# Read data
try:
tab=tab0
except:
tab0 = Table.read(data_filename)
tab=tab0
######################################################
# Component with PDS 70, there are many subcomponents in there
#~ mask = tab['membershipT'] > 0.01
#~ print(np.sum(mask))
#~ tab[mask].write('data/starsT_for_splitting_DR2.fits', overwrite=True)
#~ ######################################################
#~ # Component with MS and PMS sequence
#~ mask = tab['membershipB'] > 0.5
#~ print(np.sum(mask))
#~ tab[mask].write('data/starsB_for_splitting_DR2.fits', overwrite=True)
#~ ######################################################
# Component with MS and PMS sequence
mask = tab['membershipJ'] > 0.2
#~ #mask = tab['best_component_50']=='J'
print(np.sum(mask))
tab[mask].write('data/starsJ_for_splitting_DR2.fits', overwrite=True)
######################################################
#~ # Component with MS and PMS sequence
#~ mask = tab['membershipQ'] > 0.5
#mask = tab['best_component_50']=='Q'
#~ print(np.sum(mask))
#~ tab[mask].write('data/starsQ_for_splitting_DR2.fits', overwrite=True)
#~ ######################################################
#~ plt.scatter(tab['l'][mask], tab['b'][mask], s=1)
#~ plt.gca().invert_xaxis()
# Choose which cartesian dimensions you wish to plot
dims = [('X','Y'), ('U','V'), ('X','U'), ('Z','W')]
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8))
fig.set_tight_layout(True)
tab=tab[mask]
for ax, (dim1, dim2) in zip(axes.flatten(), dims):
# If the dimensions are velocity, mask out RV-less stars
if np.any(np.in1d([dim1, dim2], ['U', 'V', 'W'])):
maskrv = tab['radial_velocity_error']<100
t = tab[maskrv]
else:
t = tab
#~ print(t)
# Plot all stars in the table
ax.scatter(t[dim1], t[dim2], c='k', alpha=1, s=1, label='')
# Pretty plot
ax.tick_params(direction='in')
if dim1 in ['X', 'Y', 'Z']:
unit1 = 'pc'
else:
unit1 = 'km/s'
if dim2 in ['X', 'Y', 'Z']:
unit2 = 'pc'
else:
unit2 = 'km/s'
ax.set_xlabel('{} [{}]'.format(dim1, unit1))
ax.set_ylabel('{} [{}]'.format(dim2, unit2))
if dim1=='X' and dim2=='U':
ax.legend(loc=2)
plt.show()
| mit |
anntzer/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 73 | 3659 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components
of the transformed data with truncated SVD.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result after dimensionality reduction using truncated SVD
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50, edgecolor='k')
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50, edgecolor='k')
ax.set_title("Truncated SVD reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color
# to each point in the mesh [x_min, x_max]x[y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50, edgecolor='k')
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50, edgecolor='k')
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
thorwhalen/ut | stats/classification/bin/metrics.py | 1 | 9886 | __author__ = 'thor'
"""
Utilities to measure binary classification performance based on the confusion matrix.
Definitions taken from http://en.wikipedia.org/wiki/Confusion_matrix.
Author: Thor Whalen
"""
from numpy import *
import sklearn as sk
import matplotlib.pyplot as plt
# metric_mat: dict of matrices which produce specific count metrics when "scalar multiplied" with a confusion matrix
metric_mat = {
'tp': array([[0, 0], [0, 1]]),
'tn': array([[1, 0], [0, 0]]),
'fp': array([[0, 1], [0, 0]]),
'fn': array([[0, 0], [1, 0]])
}
metric_mat.update({
'p': metric_mat['tp'] + metric_mat['fn'],
'n': metric_mat['tn'] + metric_mat['fp'],
})
metric_mat.update({
'total': metric_mat['p'] + metric_mat['n']
})
# rate_metric_mats: dict of pairs of matrices to produce specific "rate metrics"
# Both elements of the pair should be "scalar multiplied" (i.e. sum(A * B) in numpy) by the confusion matrix,
# and then the first result divided by the second to get the rate.
rate_metric_mats = {
'recall': (metric_mat['tp'], metric_mat['p']),
'specificity': (metric_mat['tn'], metric_mat['n']),
'precision': (metric_mat['tp'], metric_mat['tp'] + metric_mat['fp']),
'negative_predictive_value': (metric_mat['tn'], metric_mat['tn'] + metric_mat['fn']),
'fall_out': (metric_mat['fp'], metric_mat['n']),
'false_discovery_rate': (metric_mat['fp'], metric_mat['fp'] + metric_mat['tp']),
'miss_rate': (metric_mat['fn'], metric_mat['fn'] + metric_mat['tp']),
'accuracy': (metric_mat['tp'] + metric_mat['tn'], metric_mat['total']),
'f1_score': (2 * metric_mat['tp'], 2 * metric_mat['tp'] + metric_mat['fp'] + metric_mat['fn'])
}
alternative_rate_metric_names = {
'recall': ['sensitivity', 'true_positive_rate', 'TPR', 'hit_rate'],
'specificity': ['SPC', 'true_negative_rate'],
'precision': ['positive_predictive_value', 'PPV'],
'negative_predictive_value': ['NPV'],
'fall_out': ['false_positive_rate'],
'false_discovery_rate': ['FDR'],
'miss_rate': ['false_negative_rate', 'FNR'],
'accuracy': ['ACC'],
'f1_score': ['f1']
}
for root_name, alternatives in alternative_rate_metric_names.items():
for alt in alternatives:
rate_metric_mats.update({alt: rate_metric_mats[root_name]})
class SingleMetricGauger(object):
def __init__(self, actual, probs, prob_thresh=100, metric=None,
percentile_thresh=False, **kwargs):
self.metric_mat = metric_mat
self.metric_name = kwargs.get('metric_name', None)
self.prob_name = kwargs.get('prob_name', 'probability')
self.rate_metric_mats = rate_metric_mats
self.actual = actual
self.probs = probs
self.prob_thresh = prob_thresh
if isinstance(prob_thresh, int):
self.prob_thresh = linspace(start=0, stop=1, num=prob_thresh)
if percentile_thresh: # interpret prob_thresh as percentiles of y_preds
self.prob_thresh = percentile(self.probs, list(100 * self.prob_thresh))
self.prob_thresh = array(self.prob_thresh)
if isinstance(metric, str):
self.metric_name = self.metric_name or metric
self.metric = rate_metric_mats[metric]
else:
self.metric = metric
if kwargs.get('compute_rate_metric', None) is not None:
self.compute_rate_metric = kwargs.get('compute_rate_metric')
else:
if isinstance(self.metric, tuple) and len(self.metric) == 2:
self.compute_metric = compute_rate_metric
elif shape(self.metric) == (2, 2):
# consider this as confusion_matrix weights, to be dotted with the confusion matrix and summed
self.compute_metric = dot_and_sum
self.last_gauge = None
@staticmethod
def mk_single_metric_gauger_with_mean_pred(actual, **kwargs):
return SingleMetricGauger(actual, probs=mean(actual) * ones((shape(actual))), **kwargs)
@staticmethod
def mk_profit_gauger(actual, probs, cost_of_trial, revenue_of_success, **kwargs):
"""
This gauger emulates the situation where we bet on all items above the probability threshold, incurring a cost
of cost_of_trial for every such item, and gaining revenue_of_success for every item that succeeds.
That is, the gauge is the profit:
tp * revenue_of_success - (fp + tp) * cost_of_trial
"""
cost_of_trial = abs(cost_of_trial)
kwargs = dict({'metric_name': 'profit'}, **kwargs)
return SingleMetricGauger(actual, probs,
metric=array([[0, -cost_of_trial], [0, revenue_of_success - cost_of_trial]]),
compute_metric=dot_and_sum, **kwargs)
def compute_metric_for_thresh(self, thresh, metric):
return self.compute_metric(self.confusion_matrix_for_thresh(thresh), metric)
def confusion_matrix_for_thresh(self, thresh):
return confusion_matrix(
y_true=self.actual,
y_pred=binary_prediction_from_probs_and_thresh(self.probs, thresh))
def gauge(self, metric=None, prob_thresh=None):
if metric is not None:
self.metric = metric
if prob_thresh is not None:
self.prob_thresh = prob_thresh
self.last_gauge = [self.compute_metric_for_thresh(thresh, self.metric)
for thresh in self.prob_thresh]
return self.last_gauge
def get_gauge(self, metric=None, prob_thresh=None, recompute=False):
if recompute or self.last_gauge is None:
self.gauge(metric=metric, prob_thresh=prob_thresh)
return self.last_gauge
def plot(self, *args, **kwargs):
plt.plot(self.prob_thresh, self.last_gauge, *args, **kwargs)
plt.xlabel(self.prob_name + ' threshold')
plt.ylabel(self.metric_name)
def set_metric(self, metric):
if isinstance(metric, str):
self.metric = rate_metric_mats[metric]
else:
self.metric = metric
class MultipleMetricGaugers():
def __init__(self, actual, probs, prob_thresh=100, metrics=['precision', 'recall'],
percentile_thresh=False, **kwargs):
self.gauger = list()
for m in metrics:
self.gauger.append(SingleMetricGauger(actual, probs, prob_thresh, metric=m,
percentile_thresh=percentile_thresh, **kwargs))
def plot_metric_against_another(self, i=0, j=1, *args, **kwargs):
plt.plot(self.gauger[i].get_gauge(), self.gauger[j].get_gauge(), *args, **kwargs)
if self.gauger[i].metric_name:
plt.xlabel(self.gauger[i].metric_name)
if self.gauger[j].metric_name:
plt.ylabel(self.gauger[j].metric_name)
plt.grid()
return plt.gca()
def dot_and_sum(cm, metric):
return sum(metric * cm)
def compute_rate_metric(cm, metric):
return sum(metric[0] * cm) / float(sum(metric[1] * cm))
def binary_prediction_from_probs_and_thresh(probas, thresh):
return array(probas >= thresh, dtype=float)
def confusion_matrix(y_true, y_pred, labels=None):
cm = sk.metrics.confusion_matrix(y_true, y_pred, labels)
if shape(cm) == (1, 1): # bug in sk.metrics.confusion_matrix with all true or all false inputs
if all(y_true):
return array([[0, 0], [0, len(y_true)]])
elif not any(y_true):
return array([[len(y_true), 0], [0, 0]])
else:
return cm
def sensitivity(cm):
"""
sensitivity or true positive rate (TPR), hit rate, recall
TPR = TP / P = TP / (TP+FN)
"""
return cm[1][1] / float(cm[1][1] + cm[1][0])
def recall(cm):
"""
recall or sensitivity or true positive rate (TPR), hit rate
TPR = TP / P = TP / (TP+FN)
"""
return cm[1][1] / float(cm[1][0] + cm[1][1])
def specificity(cm):
"""
specificity (SPC) or True Negative Rate
SPC = TN / N = TN / (FP + TN)
"""
return cm[0][0] / float(cm[0][0] + cm[0][1])
def precision(cm):
"""
precision or positive predictive value (PPV)
PPV = TP / (TP + FP)
"""
t = cm[1][1] / float(cm[1][1] + cm[0][1])
if isnan(t):
return 0.0
else:
return t
def negative_predictive_value(cm):
"""
negative predictive value (NPV)
NPV = TN / (TN + FN)
"""
return cm[0][0] / float(cm[0][0] + cm[1][0])
def fall_out(cm):
"""
fall-out or false positive rate (FPR)
FPR = FP / N = FP / (FP + TN)
"""
return cm[0][1] / float(cm[0][0] + cm[0][1])
def false_discovery_rate(cm):
"""
false discovery rate (FDR)
FDR = FP / (FP + TP) = 1 - PPV
"""
return cm[0][1] / float(cm[0][1] + cm[1][1])
def miss_rate(cm):
"""
Miss Rate or False Negative Rate (FNR)
FNR = FN / (FN + TP)
"""
return cm[1][0] / float(cm[1][0] + cm[1][1])
def accuracy(cm):
"""
accuracy (ACC)
ACC = (TP + TN) / (P + N)
"""
return (cm[1][1] + cm[0][0]) / float(sum(cm))
def f1_score(cm):
"""
F1 score is the harmonic mean of precision and sensitivity
F1 = 2 TP / (2 TP + FP + FN)
"""
return 2 * cm[1][1] / float(2 * cm[1][1] + cm[0][1] + cm[1][0])
def matthews_correlation_coefficient(cm):
"""
Matthews correlation coefficient (MCC)
\frac{ TP \times TN - FP \times FN {\sqrt{ (TP+FP) ( TP + FN ) ( TN + FP ) ( TN + FN )
"""
return (cm[1][1] * cm[0][0] - cm[0][1] * cm[1][0]) \
/ sqrt((cm[1][1] + cm[0][1])(cm[1][1] + cm[1][0])(cm[0][0] + cm[0][1])(cm[0][0] + cm[1][0]))
def informedness(cm):
return sensitivity(cm) + specificity(cm) - 1
def markedness(cm):
return precision(cm) + negative_predictive_value(cm) - 1
def tn(cm):
return cm[0][0]
def tp(cm):
return cm[1][1]
def fp(cm):
return cm[0][1]
def fn(cm):
return cm[1][0] | mit |
aabadie/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 42 | 20925 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = assert_warns(DeprecationWarning, estimator.decision_function, X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
posborne/mlcollection | mlcollection/simple_rl.py | 1 | 2495 | '''
Created on Mar 23, 2010
@author: Zachary Varberg
'''
from numpy import *
import scipy as Sci
import scipy.linalg
import copy
import matplotlib.pyplot as pyplot
import rl_base
class Simple_RL(rl_base.Base_RL):
def __init__(self, alpha, gamma, num_states=(15,15)):
self.num_states = num_states[0]*num_states[1]
self.dimensions = num_states
self.alpha = alpha
self.gamma = gamma
self.curr_state = self.get_start_state()
self.num_actions = 4
self.Q_mat = zeros((self.num_states, self.num_actions))
self.trans_mat = {0:-15, 1:1, 2:15, 3:-1}
def get_start_state(self):
return random.randint(0,self.num_states)
def get_reward(self, prev_state, action):
if self.prev_state + self.trans_mat[action] == (self.num_states*3/4):
return 10
return -1
def select_action(self, state):
best_action = nonzero(self.Q_mat[state]==max(self.Q_mat[state]))[0]
return best_action[random.randint(0,len(best_action))] if random.random() < .9 else random.randint(0,self.num_actions)
def execute_action(self, state, action):
move = self.trans_mat[action]
self.prev_state = state
r = self.get_reward(self.prev_state, action)
self.curr_state = min(max(self.curr_state + move, 0),self.num_states-1)
if self.prev_state % self.dimensions[0] == 0 and self.curr_state % self.dimensions[0] == 14:
self.curr_state -= 1
if self.prev_state % self.dimensions[0] == 14 and self.curr_state % self.dimensions[0] == 0:
self.curr_state += 1
self.Q_mat[self.prev_state,action] = (
self.Q_mat[self.prev_state,action]*(1-self.alpha) +
(r + self.gamma*max(self.Q_mat[self.curr_state]))*self.alpha)
if r == 10:
return True
return False
def display(self):
fig1 = pyplot.figure(1)
pyplot.plot([(sum(self.tot_reward[x-100:x])/100) if x >=100 else (sum(self.tot_reward[0:x])/x) for x in xrange(self.num_trials)])
fig1.suptitle("Rewards")
fig2 = pyplot.figure(2)
pyplot.plot([(sum(self.tot_steps[x-100:x])/100) if x >=100 else (sum(self.tot_steps[0:x])/x) for x in xrange(self.num_trials)])
fig2.suptitle("Steps")
pyplot.show()
if __name__=="__main__":
rl = Simple_RL(.1,.95)
rl.run(1000)
print rl.Q_mat | mit |
mattgiguere/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
carefree0910/MachineLearning | _Dist/TextClassification/SkRun.py | 1 | 2860 | import os
import math
import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import metrics
from _SKlearn.NaiveBayes import SKMultinomialNB
from _SKlearn.SVM import SKSVM, SKLinearSVM
from _Dist.TextClassification.GenDataset import gen_dataset
from Util.ProgressBar import ProgressBar
def main(clf):
dat_path = os.path.join("_Data", "dataset.dat")
gen_dataset(dat_path)
with open(dat_path, "rb") as _file:
x, y = pickle.load(_file)
x = [" ".join(sentence) for sentence in x]
_indices = np.random.permutation(len(x))
x = list(np.array(x)[_indices])
y = list(np.array(y)[_indices])
data_len = len(x)
batch_size = math.ceil(data_len * 0.1)
acc_lst, y_results = [], []
bar = ProgressBar(max_value=10, name=str(clf))
for i in range(10):
_next = (i + 1) * batch_size if i != 9 else data_len
x_train = x[:i * batch_size] + x[(i + 1) * batch_size:]
y_train = y[:i * batch_size] + y[(i + 1) * batch_size:]
x_test, y_test = x[i * batch_size:_next], y[i * batch_size:_next]
count_vec = CountVectorizer()
counts_train = count_vec.fit_transform(x_train)
x_test = count_vec.transform(x_test)
tfidf_transformer = TfidfTransformer()
x_train = tfidf_transformer.fit_transform(counts_train)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
acc_lst.append(clf.acc(y_test, y_pred))
y_results.append([y_test, y_pred])
del x_train, y_train, x_test, y_test, y_pred
bar.update()
return acc_lst, y_results
def run(clf):
acc_records, y_records = [], []
bar = ProgressBar(max_value=10, name="Main")
for _ in range(10):
if clf == "Naive Bayes":
_clf = SKMultinomialNB(alpha=0.1)
elif clf == "Non-linear SVM":
_clf = SKSVM()
else:
_clf = SKLinearSVM()
rs = main(_clf)
acc_records.append(rs[0])
y_records += rs[1]
bar.update()
acc_records = np.array(acc_records) * 100
plt.figure()
plt.boxplot(acc_records, vert=False, showmeans=True)
plt.show()
from Util.DataToolkit import DataToolkit
idx = np.argmax(acc_records) # type: int
print(metrics.classification_report(y_records[idx][0], y_records[idx][1], target_names=np.load(os.path.join(
"_Data", "LABEL_DIC.npy"
))))
toolkit = DataToolkit(acc_records[np.argmax(np.average(acc_records, axis=1))])
print("Acc Mean : {:8.6}".format(toolkit.mean))
print("Acc Variance : {:8.6}".format(toolkit.variance))
print("Done")
if __name__ == '__main__':
run("SVM")
| mit |
mne-tools/mne-tools.github.io | dev/_downloads/c425a1746b1a9268504279d1bf5d0d61/decoding_csp_timefreq.py | 6 | 6487 | """
.. _ex-decoding-csp-eeg-timefreq:
====================================================================
Decoding in time-frequency space using Common Spatial Patterns (CSP)
====================================================================
The time-frequency decomposition is estimated by iterating over raw data that
has been band-passed at different frequencies. This is used to compute a
covariance matrix over each epoch or a rolling time-window and extract the CSP
filtered signals. A linear discriminant classifier is then applied to these
signals.
"""
# Authors: Laura Gwilliams <[email protected]>
# Jean-Remi King <[email protected]>
# Alex Barachant <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import Epochs, create_info, events_from_annotations
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
from mne.time_frequency import AverageTFR
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
###############################################################################
# Set parameters and read data
event_id = dict(hands=2, feet=3) # motor imagery: hands vs feet
subject = 1
runs = [6, 10, 14]
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f) for f in raw_fnames])
# Extract information from the raw file
sfreq = raw.info['sfreq']
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
raw.pick_types(meg=False, eeg=True, stim=False, eog=False, exclude='bads')
raw.load_data()
# Assemble the classifier using scikit-learn pipeline
clf = make_pipeline(CSP(n_components=4, reg=None, log=True, norm_trace=False),
LinearDiscriminantAnalysis())
n_splits = 5 # how many folds to use for cross-validation
cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=42)
# Classification & time-frequency parameters
tmin, tmax = -.200, 2.000
n_cycles = 10. # how many complete cycles: used to define window size
min_freq = 5.
max_freq = 25.
n_freqs = 8 # how many frequency bins to use
# Assemble list of frequency range tuples
freqs = np.linspace(min_freq, max_freq, n_freqs) # assemble frequencies
freq_ranges = list(zip(freqs[:-1], freqs[1:])) # make freqs list of tuples
# Infer window spacing from the max freq and number of cycles to avoid gaps
window_spacing = (n_cycles / np.max(freqs) / 2.)
centered_w_times = np.arange(tmin, tmax, window_spacing)[1:]
n_windows = len(centered_w_times)
# Instantiate label encoder
le = LabelEncoder()
###############################################################################
# Loop through frequencies, apply classifier and save scores
# init scores
freq_scores = np.zeros((n_freqs - 1,))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
X = epochs.get_data()
# Save mean scores over folds for each frequency and time window
freq_scores[freq] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot frequency results
plt.bar(freqs[:-1], freq_scores, width=np.diff(freqs)[0],
align='edge', edgecolor='black')
plt.xticks(freqs)
plt.ylim([0, 1])
plt.axhline(len(epochs['feet']) / len(epochs), color='k', linestyle='--',
label='chance level')
plt.legend()
plt.xlabel('Frequency (Hz)')
plt.ylabel('Decoding Scores')
plt.title('Frequency Decoding Scores')
###############################################################################
# Loop through frequencies and time, apply classifier and save scores
# init scores
tf_scores = np.zeros((n_freqs - 1, n_windows))
# Loop through each frequency range of interest
for freq, (fmin, fmax) in enumerate(freq_ranges):
# Infer window size based on the frequency being used
w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds
# Apply band-pass filter to isolate the specified frequencies
raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin',
skip_by_annotation='edge')
# Extract epochs from filtered data, padded by window size
epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size,
proj=False, baseline=None, preload=True)
epochs.drop_bad()
y = le.fit_transform(epochs.events[:, 2])
# Roll covariance, csp and lda over time
for t, w_time in enumerate(centered_w_times):
# Center the min and max of the window
w_tmin = w_time - w_size / 2.
w_tmax = w_time + w_size / 2.
# Crop data into time-window of interest
X = epochs.copy().crop(w_tmin, w_tmax).get_data()
# Save mean scores over folds for each frequency and time window
tf_scores[freq, t] = np.mean(cross_val_score(estimator=clf, X=X, y=y,
scoring='roc_auc', cv=cv,
n_jobs=1), axis=0)
###############################################################################
# Plot time-frequency results
# Set up time frequency object
av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :],
centered_w_times, freqs[1:], 1)
chance = np.mean(y) # set chance level to white in the plot
av_tfr.plot([0], vmin=chance, title="Time-Frequency Decoding Scores",
cmap=plt.cm.Reds)
| bsd-3-clause |
mjgrav2001/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
jzt5132/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
lpryszcz/bin | bed2cnv.py | 1 | 3190 | #!/usr/bin/env python
desc="""Report CNVs from read counts in genome windows (BED).
"""
epilog="""Author:
Leszek Pryszcz
[email protected]
Mizerow, 01/02/2014
"""
import argparse, subprocess, os, sys
import numpy as np
import pandas as pd
from datetime import datetime
def load_bed(files):
"""Load BED file into Pandas object"""
for i, file in enumerate(files):
strain = file.name.split('.')[0] #".".join(file.name.split('.')[:2])
if i:
bed = pd.merge(bed, pd.read_table(file, names=('chr', 'start', 'end', strain)), \
on=('chr','start','end')) #how='outer'
else:
bed = pd.read_table(file, names=('chr','start','end',strain))
#sort by chromosome position
bed = bed.sort(columns=('chr','start','end'))
return bed
def bed2cnv(files, out, alpha, verbose):
"""Report deletions/duplications at given threshold"""
if verbose:
sys.stderr.write("Loading BED...\n")
#load BED files
bed = load_bed(files)
if verbose:
sys.stderr.write("Normalising and selecting CNVs...\n")
#normalise
deletions = duplications = np.array([False]*len(bed))
for strain in bed.columns[3:]:
#reads per 1kb window
bed[strain] = bed[strain] * 1000.0 / (bed.end - bed.start)
sys.stderr.write("%s %.2f %.2f\n"%(strain, bed[strain].mean(), bed[strain].std()))
#observed / expected
bed[strain] = np.log2(bed[strain] / bed[strain].mean())
#get deletions
dels = bed[strain] < bed[strain].quantile(0.0+alpha/2)
deletions = deletions + dels
#get duplications
dups = bed[strain] > bed[strain].quantile(1.0-alpha/2)
#dups.nonzero()
duplications = duplications + dups
sys.stderr.write("%s %s %s\n"%(strain, len(dels.nonzero()[0]), len(dups.nonzero()[0])))
#select cnvs
sys.stderr.write("Saving %s deletions and %s duplications.\n" % (deletions.tolist().count(True), duplications.tolist().count(True)))
bed[duplications + deletions].to_excel(out, sheet_name='CNVs', index=0)
bed[duplications + deletions].to_csv(out+'.tsv', sep='\t', header=0, index=0)
def main():
usage = "%(prog)s [options] -i bed1 bed2 bed3"
parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog)
parser.add_argument("-v", "--verbose", default=False, action="store_true")
parser.add_argument("--version", action="version", version='%(prog)s 0.1')
parser.add_argument("-i", "--inputs", nargs="+", type=file,
help="input file(s)")
parser.add_argument("-o", "--output", default='out.xls',
help="output xls file")
parser.add_argument("-a", "--alpha", default=0.05,
help="alpha to call CNVs")
o = parser.parse_args()
bed2cnv(o.inputs, o.output, o.alpha, o.verbose)
if __name__=='__main__':
t0=datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
dt=datetime.now()-t0
sys.stderr.write("## Time elapsed: %s\n" % dt)
| gpl-3.0 |
compops/pmh-joe2015 | scripts-mwe/mwe-gaussian-iid-1parameter.py | 2 | 4164 | ##############################################################################
# Minimal working example
# Parameter inference in Gaussian IID model
# using correlated psuedo-marginal Metropolis-Hastings
#
# (c) Johan Dahlin 2016 ( johan.dahlin (at) liu.se )
##############################################################################
import numpy as np
import matplotlib.pylab as plt
from state import smc
from para import pmh_correlatedRVs
from models import normalIID_2parameters
np.random.seed( 87655678 );
##############################################################################
# Arrange the data structures
##############################################################################
sm = smc.smcSampler();
pmh = pmh_correlatedRVs.stcPMH();
##############################################################################
# Setup the system
##############################################################################
sys = normalIID_2parameters.ssm()
sys.par = np.zeros((sys.nPar,1))
sys.par[0] = 0.50;
sys.par[1] = 0.30;
sys.par[2] = 0.10;
sys.T = 10;
sys.xo = 0.0;
##############################################################################
# Generate data
##############################################################################
sys.generateData();
##############################################################################
# Setup the parameters
##############################################################################
th = normalIID_2parameters.ssm()
th.nParInference = 1;
th.copyData(sys);
##############################################################################
# Setup the IS algorithm
##############################################################################
sm.filter = sm.SISrv;
sm.sortParticles = False;
sm.nPart = 10;
sm.resampFactor = 2.0;
sm.genInitialState = True;
##############################################################################
# Setup the PMH algorithm
##############################################################################
pmh.nIter = 30000;
pmh.nBurnIn = 10000;
pmh.nProgressReport = 5000;
pmh.rvnSamples = 1 + sm.nPart;
pmh.writeOutProgressToFile = False;
# Set initial parameters
pmh.initPar = sys.par;
# Settings for th proposal
pmh.invHessian = 1.0;
pmh.stepSize = 0.1;
# Settings for u proposal
pmh.alpha = 0.00;
##############################################################################
# Run the correlated pmMH algorithm
##############################################################################
# Correlated random numbers
pmh.sigmaU = 0.50
pmh.runSampler( sm, sys, th );
muCPMMH = pmh.th
iactC = pmh.calcIACT()
# Uncorrelated random numbers (standard pmMH)
pmh.sigmaU = 1.0
pmh.runSampler( sm, sys, th );
muUPMMH = pmh.th
iactU = pmh.calcIACT()
(iactC, iactU)
##############################################################################
# Plot the comparison
##############################################################################
plt.figure(1);
plt.subplot(2,3,1);
plt.plot(muCPMMH[:,0]);
plt.xlabel("iteration");
plt.ylabel("mu (cpmMH)");
plt.subplot(2,3,2);
plt.hist(muCPMMH[:,0],normed=True);
plt.xlabel("mu");
plt.ylabel("posterior estimate (cpmMH)");
plt.subplot(2,3,3);
plt.acorr(muCPMMH[:,0],maxlags=100);
plt.axis((0,100,0.92,1))
plt.xlabel("lag");
plt.ylabel("acf of mu (cpmMH)");
plt.figure(1);
plt.subplot(2,3,4);
plt.plot(muUPMMH[:,0]);
plt.xlabel("iteration");
plt.ylabel("mu (pmMH)");
plt.subplot(2,3,5);
plt.hist(muUPMMH[:,0],normed=True);
plt.xlabel("mu");
plt.ylabel("posterior estimate (pmMH)");
plt.subplot(2,3,6);
plt.acorr(muUPMMH[:,0],maxlags=100);
plt.axis((0,100,0.92,1))
plt.xlabel("iteration");
plt.ylabel("acf of mu (pmMH)");
##############################################################################
# End of file
############################################################################## | mit |
tseaver/google-cloud-python | scheduler/docs/conf.py | 2 | 11893 | # -*- coding: utf-8 -*-
#
# google-cloud-scheduler documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-scheduler"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-scheduler-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-scheduler.tex",
u"google-cloud-scheduler Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-scheduler",
u"google-cloud-scheduler Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-scheduler",
u"google-cloud-scheduler Documentation",
author,
"google-cloud-scheduler",
"GAPIC library for the {metadata.shortName} service",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("https://requests.kennethreitz.org/en/stable/", None),
"fastavro": ("https://fastavro.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| apache-2.0 |
NicovincX2/Python-3.5 | Analyse (mathématiques)/Analyse numérique/Équations différentielles numériques/Méthode des éléments finis/femmat2d.py | 1 | 6605 | # -*- coding: utf-8 -*-
"""
Class for generating 2D finite element matrices
Copyright (C) 2013 Greg von Winckel
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Created: Sun May 26 11:26:58 MDT 2013
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import os
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import spsolve
class femmat2d(object):
""" This object is created using a triangulation and produces
the various matrices needed for a finite element calcuation
as well as specifyin which grid points are on the interior
and which are on the boundary.
This version of the code supports piecewise linear variable "reaction"
coefficients and piecewise constant "diffusion" coefficients """
def __init__(self, triang):
self.tri = triang.triangles
self.x = triang.x
self.y = triang.y
self.nbr = triang.neighbors
self.nvert = len(self.x) # Number of grid points (vertices)
self.nel = len(self.tri) # Number of elements (triangles)
# Determine which points are on the boundary and which are in the
# interior
self.bpt = set(self.tri.flat[np.flatnonzero(triang.neighbors == -1)])
self.ipt = set(np.arange(self.nvert)).difference(self.bpt)
# Vertices of reference triangle
S = np.array(((-1, -1), (1, 0), (0, 1)))
# Coefficients of mapping from reference to target elements
# A1 = (a11,a21), A2 = (a12,a22)
A1 = np.dot(x[self.tri], S)
A2 = np.dot(y[self.tri], S)
self.Adet = A1[:, 0] * A2[:, 1] - A1[:, 1] * A2[:, 0]
# Coefficients of inverse mapping
Ap1 = np.c_[A2[:, 1], -A1[:, 1]] / self.Adet.reshape(self.nel, 1)
Ap2 = np.c_[-A2[:, 0], A1[:, 0]] / self.Adet.reshape(self.nel, 1)
# Basic matrix types on the reference element
self.M = np.array(((2, 1, 1), (1, 2, 1), (1, 1, 2))) / 24.0
self.Kxx = np.array(((1, -1, 0), (-1, 1, 0), (0, 0, 0))) / 2.0
self.Kxy = np.array(((1, 0, -1), (-1, 0, 1), (0, 0, 0))) / 2.0
self.Kyy = np.array(((1, 0, -1), (0, 0, 0), (-1, 0, 1))) / 2.0
self.Phi1 = np.array(((6, 2, 2), (2, 2, 1), (2, 1, 2))) / 120.0
self.Phi2 = np.array(((2, 2, 1), (2, 6, 2), (1, 2, 2))) / 120.0
self.Phi3 = np.array(((2, 1, 2), (1, 2, 2), (2, 2, 6))) / 120.0
# Compute all of the elemental stiffness and mass matrices
self.cxx = (Ap1[:, 0]**2 + Ap1[:, 1]**2) * self.Adet
self.cxy = (Ap1[:, 0] * Ap2[:, 0] + Ap1[:, 1] * Ap2[:, 1]) * self.Adet
self.cyy = (Ap2[:, 0]**2 + Ap2[:, 1]**2) * self.Adet
# Indices of the nonzero elements in the assembled matrices
self.rows = np.array(map(lambda s: np.outer(np.ones(3), self.tri[s]),
range(self.nel))).flatten("C")
self.cols = np.array(map(lambda s: np.outer(self.tri[s], np.ones(3)),
range(self.nel))).flatten("C")
def getInteriorPoints(self):
return list(self.ipt)
def getBoundaryPoints(self):
return list(self.bpt)
def assemble_Mtype(self, v=None, order=0):
""" Corresponds to the weak form (phi_j,v*phi_k) where
if order = 0, v has the same length as the number of elements
if order = 1, v has the same length as the number of vertices """
if v is None:
Mel = np.kron(self.Adet, self.M)
else:
if order is 0:
Mel = np.kron(self.Adet * v, self.M)
else:
Mel = np.kron(v[self.tri[:, 0]] * self.Adet, self.Phi1) + \
np.kron(v[self.tri[:, 1]] * self.Adet, self.Phi2) + \
np.kron(v[self.tri[:, 2]] * self.Adet, self.Phi3)
M = csr_matrix((Mel.flatten("F"), (self.rows, self.cols)))
return M
def assemble_Ktype(self, v=None):
""" Corresponds to the weak form v*(grad[phi_j],\grad[phi_k]) where
v should have the same length as the number of elements """
if v is None:
Kel = np.kron(self.cxx, self.Kxx) + \
np.kron(self.cxy, self.Kxy) + \
np.kron(self.cxy, self.Kxy.T) + \
np.kron(self.cyy, self.Kyy)
else:
Kel = np.kron(v * self.cxx, self.Kxx) + \
np.kron(v * self.cxy, self.Kxy) + \
np.kron(v * self.cxy, self.Kxy.T) + \
np.kron(v * self.cyy, self.Kyy)
K = csr_matrix((Kel.flatten("F"), (self.rows, self.cols)))
return K
if __name__ == '__main__':
# number of grid points per dimension
n = 100
q = np.linspace(-2, 2, n)
# Create tensor product grid of x,y coordinates and column stack them as
# vectors
x, y = map(lambda s: s.flatten(), np.meshgrid(q, q))
# Create triangle mesh
t = tri.Triangulation(x, y)
fem = femmat2d(t)
# Compute centroids of elements
xc = np.mean(x[t.triangles], 1)
yc = np.mean(y[t.triangles], 1)
# Number of elements
Nel = t.triangles.shape[0]
# Number of vertices
Nvert = len(x)
# Variable "diffusion" coefficient (piecewise constant)
u = np.ones(Nel)
dex = np.where((xc > 0) & (yc > 0))
u[dex] = 2
# Variable "reaction" coefficient (piecesise linear)
v = np.exp(x - y)
f = 50 * np.sin(np.pi * (y + x))
M = fem.assemble_Mtype()
V = fem.assemble_Mtype(v, 1)
U = fem.assemble_Ktype(u)
A = U + V
F = M * f
i = fem.getInteriorPoints()
b = fem.getBoundaryPoints()
xb = x[b]
yb = y[b]
psi = np.zeros(Nvert)
# Bounday forcing term
g = np.zeros(Nvert)
# g[b] = np.sin(np.pi*xb)
# Dirichlet problem
# psi[i] = spsolve(A[i,:][:,i],F[i]-A[i,:][:,b]*g[b])
# Neumann problem
psi = spsolve(A, F - g)
# Plot solution
plt.tricontourf(t, psi, 50)
plt.show()
os.system("pause")
| gpl-3.0 |
elijah513/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
CtrlC-Root/cse5526 | p1all.py | 1 | 1254 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import matplotlib.pyplot as plot
# graph for part 1
rates = [0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50]
workers = {}
for rate in rates:
workers[rate] = subprocess.Popen(
['./p1sim.py', '--rate', str(rate), '--momentum', '0.0'],
stdout=subprocess.PIPE)
epochs = {}
for rate, worker in workers.items():
worker.wait()
stdout, stderr = worker.communicate()
epochs[rate] = int(stdout.split('\n')[-2])
one, = plot.plot(
epochs.keys(),
epochs.values(),
'-',
linewidth=2,
label="Momentum 0.0")
# graph for part 2
workers = {}
for rate in rates:
workers[rate] = subprocess.Popen(
['./p1sim.py', '--rate', str(rate), '--momentum', '0.9'],
stdout=subprocess.PIPE)
epochs = {}
for rate, worker in workers.items():
worker.wait()
stdout, stderr = worker.communicate()
epochs[rate] = int(stdout.split('\n')[-2])
two, = plot.plot(
epochs.keys(),
epochs.values(),
'--',
linewidth=2,
label="Momentum 0.9")
# generate the plot and save it
plot.xlabel("Learning Rate")
plot.ylabel("Epochs")
plot.title("Network Training")
plot.legend()
plot.grid(True)
plot.savefig('project1.png')
| mit |
soazig/project-epsilon | code/utils/outlier_script.py | 1 | 8215 | """ Script to run diagnostic analysis on FMRI run
The FMRI 'run' is a continuous collection of one or more 3D volumes.
A run is usually stored as a 4D NIfTI image.
In this case we are analyzing the 4D NIfTI image: "ds114_sub009_t2r1.nii"
Fill in the code necessary under the comments below.
As you are debugging, we suggest you run this script from within IPython, with
::
run diagnosis_script.py
Remember, in IPython, that you will need to "reload" any modules that have
changed. So, if you have imported your module like this:
import diagnostics
Then you will need to run this before rerunning your script, to get the latest
version of the code.
reload(diagnostics)
Before you submit your homework, don't forget to check this script also runs
correctly from the terminal, with::
python diagnosis_script.py
"""
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
import numpy.linalg as npl
from itertools import product
import diagnostics
reload(diagnostics)
"""
* Load the image as an image object
* Load the image data from the image
* Drop the first four volumes, as we know these are outliers
"""
img = nib.load('ds114_sub009_t2r1.nii')
data = img.get_data()
data = data[...,4:]
"""
Use your vol_std function to get the volume standard deviation values for the
remaining 169 volumes.
Write these 169 values out to a text file.
*IMPORTANT* - this text file MUST be called 'vol_std_values.txt'
"""
volstd = diagnostics.vol_std(data)
fobj = open('vol_std_values.txt', 'wt')
for i in volstd:
fobj.write(str(i) + '\n')
fobj.close()
"""
Use the iqr_outlier detection routine to get indices of outlier volumes.
Write these indices out to a text file.
*IMPORTANT* - this text file MUST be called 'vol_std_outliers.txt'
"""
outliers_index, thres = diagnostics.iqr_outliers(volstd)
fobj = open('vol_std_outliers.txt', 'wt')
for i in outliers_index:
fobj.write(str(i) + '\n')
fobj.close()
"""
Plot all these on the same plot:
* The volume standard deviation values;
* The outlier points from the std values, marked on the plot with an 'o'
marker;
* A horizontal dashed line at the lower IRQ threshold;
* A horizontal dashed line at the higher IRQ threshold;
Extra points for a good legend to the plot.
Save the figure to the current directory as ``vol_std.png``.
IMPORTANT - use exactly this name.
"""
plt.plot(volstd,'r', label='volume sd values')
for i in outliers_index:
plt.plot(i,volstd[i],'o',color='b')
plt.axhline(y=thres[0], color='g',ls='dashed',label='low threshold')
plt.axhline(y=thres[1], color='black',ls='dashed',label='high threshold')
plt.legend(loc=4)
plt.savefig('vol_std.png')
plt.clf()
""" Next calculate and plot the RMS difference values
* Calculate the RMS difference values for the image data;
* Use the ``iqr_outlier`` function to return indices of possible outliers in
this RMS difference vector;
On the same plot, plot the following:
* The RMS vector;
* The identified outlier points marked with an `o` marker;
* A horizontal dashed line at the lower IRQ threshold;
* A horizontal dashed line at the higher IRQ threshold;
IMPORTANT - save this plot as ``vol_rms_outliers.png``
"""
rmsd = diagnostics.vol_rms_diff(data)
outliers_rms_index, thres_rms = diagnostics.iqr_outliers(rmsd)
plt.plot(rmsd,'r', label='rms differences values')
for i in outliers_rms_index:
plt.plot(i,rmsd[i],'o', color='blue')
plt.axhline(y=thres_rms[0], color='g',ls='dashed',label='low threshold')
plt.axhline(y=thres_rms[1], color='black',ls='dashed',label='high threshold')
plt.legend(loc=1)
plt.xlabel('volume')
plt.ylabel('rms difference')
plt.savefig('vol_rms_outliers.png')
plt.clf()
""" Use the ``extend_diff_outliers`` to label outliers
Use ``extend_diff_outliers`` on the output from ``iqr_outliers`` on the RMS
difference values. This gives you indices for labeled outliers.
On the same plot, plot the following:
* The RMS vector with a 0 appended to make it have length the same as the
number of volumes in the image data array;
* The identified outliers shown with an `o` marker;
* A horizontal dashed line at the lower IRQ threshold;
* A horizontal dashed line at the higher IRQ threshold;
IMPORTANT - save this plot as ``extended_vol_rms_outliers.png``
"""
outliers_rms_label = diagnostics.extend_diff_outliers(outliers_rms_index)
rmsd_append = np.append(rmsd,0)
plt.plot(rmsd_append,'r', label='rms extended differences values')
for i in outliers_rms_label:
plt.plot(i,rmsd_append[i],'o', color='blue')
plt.axhline(y=thres_rms[0], color='g',ls='dashed',label='low threshold')
plt.axhline(y=thres_rms[1], color='black',ls='dashed',label='high threshold')
plt.legend(loc=3)
plt.xlabel('volume')
plt.ylabel('rms difference')
plt.savefig('extended_vol_rms_outliers.png')
plt.clf()
""" Write the extended outlier indices to a text file.
IMPORTANT: name the text file extended_vol_rms_outliers.txt
"""
fobj = open('extended_vol_rms_outliers.txt', 'wt')
for i in outliers_rms_label:
fobj.write(str(i) + '\n')
fobj.close()
""" Show that the residuals drop when removing the outliers
Create a design matrix for the image data with the convolved neural regressor
and an intercept column (column of 1s).
Load the convolved neural time-course from ``ds114_sub009_t2r1_conv.txt``.
Fit this design to estimate the (2) betas for each voxel.
Subtract the fitted data from the data to form the residuals.
Calculate the mean residual sum of squares (MRSS) at each voxel (the sum of
squared divided by the residual degrees of freedom).
Finally, take the mean of the MRSS values across voxels. Print this value.
"""
convolved = np.loadtxt('ds114_sub009_t2r1_conv.txt')
convolved = convolved[4:]
N = len(convolved)
X = np.ones((N, 2))
X[:, 0] = convolved
Xp = npl.pinv(X)
Y = np.reshape(data, (-1, data.shape[-1]))
beta=np.dot(Xp,Y.T)
betas_4d = np.reshape(beta.T, img.shape[:-1] + (-1,))
Y_hat=np.dot(betas_4d, X.T)
df=X.shape[0]-npl.matrix_rank(X)
diff=data-Y_hat
residual=np.sum(diff**2)/df
residual=residual/np.product(data.shape[:-1])
print(residual)
"""
lst=[]
for i in np.shape(data[...,-1]):
lst.append(range(i))
data_index=list(product(*lst))
beta=[]
for i in data_index:
beta.append(Xp.dot(data[i]))
fitted=[]
for i in beta:
fitted.append(X.dot(i))
errors=[]
for i in range(data_index):
errors.append(data[data_index[i]]-fitted[i])
residual=[]
for i in errors:
residual.append(np.sum(i ** 2)/(X.shape[0] - npl.matrix_rank(X)))
print(mean(residual))
"""
"""
Next do the exactly the same, except removing the extended RMS difference
outlier volumes from the data and the corresponding rows for the design.
Print the mean of the RMSS values across voxels. Is this value smaller?
"""
convolved_extend = np.delete(convolved, outliers_rms_label, 0)
N = len(convolved_extend)
X = np.ones((N, 2))
X[:, 0] = convolved_extend
Xp = npl.pinv(X)
data_extend = np.delete(data, outliers_rms_label,3)
Y = np.reshape(data_extend, (-1, data_extend.shape[-1]))
beta=np.dot(Xp,Y.T)
betas_4d = np.reshape(beta.T, img.shape[:-1] + (-1,))
Y_hat=np.dot(betas_4d, X.T)
df=X.shape[0]-npl.matrix_rank(X)
diff=data_extend-Y_hat
residual_extend=np.sum(diff**2)/df
residual_extend=residual_extend/np.product(data.shape[:-1])
print(residual_extend)
"""
Y_hat=np.dot(X,beta)
Y_hat=Y_hat.T
Y_hat=Y_hat.reshape(np.shape(data_extend)[0],np.shape(data_extend)[1],np.shape(data_extend)[2],np.shape(data_extend)[3])
df=X.shape[0]-npl.matrix_rank(X)
diff=data_extend-Y_hat
residual_extend=np.sum(diff**2)/df
residual_extend=residual_extend/np.shape(data_extend)[3]
print(residual_extend)
"""
""" Now save these two mean MRSS values to a text file
IMPORTANT: save to ``mean_mrss_vals.txt``
"""
fobj = open('mean_mrss_vals.txt', 'wt')
fobj.write("residual = " + str(residual) + '\n')
fobj.write("residual_extend = " + str(residual_extend) + '\n')
fobj.close()
# Some final checks that you wrote the files with their correct names
from os.path import exists
assert exists('vol_std_values.txt')
assert exists('vol_std_outliers.txt')
assert exists('vol_std.png')
assert exists('vol_rms_outliers.png')
assert exists('extended_vol_rms_outliers.png')
assert exists('extended_vol_rms_outliers.txt')
assert exists('mean_mrss_vals.txt')
| bsd-3-clause |
ggandhi27/Global-Terror-Analysis | client1/scripts/mapper1.py | 1 | 2115 | #code that print top 30 frequently occurred atacks in countries and display year versus number of attacks graph
#!/usr/bin/python2
import operator
import matplotlib.pyplot as plt
import numpy as np
import sys
#f=open('globalterrorismdb_0616dist.csv','a+')
countrylist=[]
for i in sys.stdin:
countrylist.append(i.split(',')[6])
attackspercountry={}
for i in countrylist:
if i in attackspercountry:
attackspercountry[i]+=1
else:
attackspercountry[i]=1
country=[]
occur1=[]
sorted_x=sorted(attackspercountry.items(),key=operator.itemgetter(1))
#print sorted_x
for i in sorted_x:
country.append(i[0])
occur1.append(i[1])
#print country
#print occur1
c=-1
reverse=[]
while c>-30:
reverse.append(occur1[c])
c-=1
cc=-1
reversee=[]
while cc>-30:
reversee.append(country[cc])
cc-=1
#print reverse
#print reversee
ff=open('top30','a+')
ff.write("Top 30 most frequently occured terrorist attacks in countries:")
for i,j in zip(reverse,reversee):
ff.write("%d terrorist attacks in %s\n"%(i,j))
occurbar=[]
c=-1
while c>-8:
occurbar.append(occur1[c])
c-=1
a=[]
for i in occurbar:
print type(i)
a.append(str(i))
print a
a=','.join(a)
print a
#print occurbar
cc=-1
countrybar=[]
while cc>-8:
countrybar.append(country[cc])
cc-=1
#countrybar=','.join(countrybar)
b=[]
for i in countrybar:
print type(i)
b.append(str(i))
print b
b=','.join(b)
print b
final=a+":"+b
print final
'''
y_pos=np.arange(len(countrybar))
plt.bar(y_pos,occurbar,align='center',alpha=0.5)
plt.xticks(y_pos,countrybar)
plt.ylabel('number of attacks')
plt.title('Number of attacks in countries')
plt.show()
yearlist=[]
z=1
for i in sys.stdin:
if z==1:
z+=1
pass
else:
yearlist.append(i.split(',')[1])
attacksperyear={}
for i in yearlist:
if i in attacksperyear:
attacksperyear[i]+=1
else:
attacksperyear[i]=1
#print attacksperyear
year=[]
occur=[]
sorted_xx=sorted(attacksperyear.items(),key=operator.itemgetter(0))
#print sorted_xx
for i in sorted_xx:
year.append(i[0])
occur.append(i[1])
#print year
#print occur
plt.plot(year,occur)
plt.savefig('year.png')
plt.show()
'''
| apache-2.0 |
PrashntS/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
yobibyte/DeepFried2 | examples/Kaggle-Otto/run.py | 3 | 2055 | import DeepFried2 as df
import numpy as np
import pandas as pd
from os.path import dirname, join as pjoin
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from train import train
from test import validate
def load_train_data():
train_data = pd.read_csv(pjoin(dirname(__file__), 'data', 'train.csv'))
labels = train_data.target.values
labels = LabelEncoder().fit_transform(labels)
train_data = train_data.drop('id', axis=1)
train_data = train_data.drop('target', axis=1)
return train_data.as_matrix(), labels
def nnet():
model = df.Sequential()
model.add(df.AddConstant(1.0))
model.add(df.Log())
model.add(df.BatchNormalization(93))
model.add(df.Dropout(0.1))
model.add(df.Linear(93, 512))
model.add(df.BatchNormalization(512))
model.add(df.ReLU())
model.add(df.Dropout(0.5))
model.add(df.Linear(512, 512))
model.add(df.BatchNormalization(512))
model.add(df.ReLU())
model.add(df.Dropout(0.5))
model.add(df.Linear(512, 512))
model.add(df.BatchNormalization(512))
model.add(df.ReLU())
model.add(df.Dropout(0.5))
model.add(df.Linear(512, 9))
model.add(df.SoftMax())
return model
if __name__ == "__main__":
if __package__ is None: # PEP366
__package__ = "DeepFried2.examples.KaggleOtto"
train_data_x, train_data_y = load_train_data()
train_data_x, valid_data_x, train_data_y, valid_data_y = train_test_split(train_data_x, train_data_y, train_size=0.85)
model = nnet()
criterion = df.ClassNLLCriterion()
optimiser = df.Momentum(lr=0.01, momentum=0.9)
for epoch in range(1, 1001):
model.training()
if epoch % 100 == 0:
optimiser.hyperparams['lr'] /= 10
train(train_data_x, train_data_y, model, optimiser, criterion, epoch, 100, 'train')
train(train_data_x, train_data_y, model, optimiser, criterion, epoch, 100, 'stats')
model.evaluate()
validate(valid_data_x, valid_data_y, model, epoch, 100)
| mit |
janvanrijn/openml-pimp | examples/plot/kde.py | 1 | 7128 | import arff
import argparse
import collections
import json
import matplotlib
import numpy as np
import openmlpimp
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from scipy.stats import rv_discrete
from ConfigSpace.hyperparameters import CategoricalHyperparameter, NumericalHyperparameter, UniformIntegerHyperparameter
def parse_args():
parser = argparse.ArgumentParser(description='Plot PDF diagrams according to KernelDensity Estimator')
all_classifiers = ['adaboost', 'bernoulli_nb', 'decision_tree', 'extra_trees', 'gaussian_nb', 'gradient_boosting',
'k_nearest_neighbors', 'lda', 'liblinear_svc', 'libsvm_svc', 'multinomial_nb', 'passive_aggressive',
'qda', 'random_forest', 'sgd']
all_classifiers = ['adaboost', 'random_forest']
parser.add_argument('--flow_id', type=int, default=7707, help='the OpenML flow id')
parser.add_argument('--classifier', type=str, default='libsvm_svc', help='the OpenML flow id')
parser.add_argument('--study_id', type=str, default='OpenML100', help='the OpenML study id')
parser.add_argument('--fixed_parameters', type=json.loads, default={'kernel': 'sigmoid'}, help='Will only use configurations that have these parameters fixed')
parser.add_argument('--cache_directory', type=str, default=os.path.expanduser('~') + '/experiments/cache_kde', help="Directory containing cache files")
parser.add_argument('--output_directory', type=str, default=os.path.expanduser('~') + '/experiments/pdf', help="Directory to save the result files")
parser.add_argument('--result_directory', type=str, default=os.path.expanduser('~') + '/nemo/experiments/priorbased_experiments', help="Adds samples obtained from a result directory")
args = parser.parse_args()
return args
def obtain_sampled_parameters(directory):
import glob
files = glob.glob(directory + '/*/*.arff')
values = collections.defaultdict(list)
for file in files:
with open(file, 'r') as fp:
arff_file = arff.load(fp)
for idx, attribute in enumerate(arff_file['attributes']):
attribute_name = attribute[0]
if attribute_name.startswith('parameter_'):
canonical_name = attribute_name.split('__')[-1]
values[canonical_name].extend([arff_file['data'][x][idx] for x in range(len(arff_file['data']))])
return values
def plot_categorical(X, output_dir, parameter_name):
try:
os.makedirs(output_dir)
except FileExistsError:
pass
X_prime = collections.OrderedDict()
for value in X:
if value not in X_prime:
X_prime[value] = 0
X_prime[value] += (1.0 / len(X))
distrib = rv_discrete(values=(list(range(len(X_prime))), list(X_prime.values())))
fig, ax = plt.subplots()
# TODO: resampled from dist, but will do.
ax.hist(distrib.rvs(size=100), range=(0, len(X_prime)))
ax.legend(loc='upper left')
plt.savefig(output_dir + parameter_name + '.png', bbox_inches='tight')
plt.close()
def plot_numeric(hyperparameter, data, histo_keys, output_dir, parameter_name, resolution=100):
try:
os.makedirs(output_dir)
except FileExistsError:
pass
factor = 1.0
colors = ['r', 'b', 'g', 'c', 'm', 'y', 'k', 'w']
min = np.power(hyperparameter.lower, factor)
max = np.power(hyperparameter.upper, factor)
if max < hyperparameter.upper:
max = hyperparameter.upper * factor
fig, axes = plt.subplots(1, figsize=(8, 6))
for index, name in enumerate(data):
if name in histo_keys:
pass
# axes[0].hist(data[name], resolution, normed=True, facecolor=colors[index], alpha=0.75)
else:
if hyperparameter.log:
X_values_plot = np.logspace(np.log(min), np.log(max), resolution)
axes.set_xscale("log")
else:
X_values_plot = np.linspace(min, max, resolution)
if isinstance(hyperparameter, UniformIntegerHyperparameter):
axes.xaxis.set_major_locator(MaxNLocator(integer=True))
# plot pdfs
distribution = openmlpimp.utils.priors.gaussian_kde_wrapper(hyperparameter, hyperparameter.name, data[name])
axes.plot(X_values_plot, distribution.pdf(X_values_plot), colors[index]+'-', lw=5, alpha=0.6, label=name.replace('_', ' '))
# plot cdfs
# sorted = np.sort(np.array(data[name]))
# yvals = np.arange(1, len(sorted) + 1) / float(len(sorted))
# axes[1].step(sorted, yvals, linewidth=1, c=colors[index], label=name)
# add original data points
#if 'gaussian_kde' in data:
# axes.plot(data['gaussian_kde'], -0.005 - 0.01 * np.random.random(len(data['gaussian_kde'])), '+k')
# axis and labels
#axes[1].legend(loc='upper left')
axes.set_xlim(min, max)
# plot
plt.savefig(output_dir + parameter_name + '.pdf', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
tick_fontsize = 18
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['xtick.labelsize'] = tick_fontsize
matplotlib.rcParams['ytick.labelsize'] = tick_fontsize
args = parse_args()
folder_suffix = openmlpimp.utils.fixed_parameters_to_suffix(args.fixed_parameters)
output_dir = args.output_directory + '/hyperband_5/' + str(args.flow_id) + '/' + folder_suffix
cache_dir = args.cache_directory + '/hyperband_5/' + str(args.flow_id) + '/' + folder_suffix
results_dir = args.result_directory + '/hyperband_5/' + str(args.flow_id) + '/' + folder_suffix
configspace = openmlpimp.utils.get_config_space_casualnames(args.classifier, args.fixed_parameters)
obtained_results = {}
if args.result_directory is not None:
for strategy in os.listdir(results_dir):
res = obtain_sampled_parameters(os.path.join(results_dir, strategy))
if len(res):
obtained_results[strategy] = res
param_priors = openmlpimp.utils.obtain_priors(cache_dir, args.study_id, args.flow_id, configspace, args.fixed_parameters, holdout=None, bestN=10)
for param_name, priors in param_priors.items():
if all(x == priors[0] for x in priors):
continue
current_parameter = configspace.get_hyperparameter(param_name)
histo_keys = set()
if isinstance(current_parameter, NumericalHyperparameter):
data = collections.OrderedDict({'gaussian_kde': priors})
for strategy in obtained_results:
strategy_name = openmlpimp.utils.plot._determine_name(strategy)
data[strategy_name] = np.array(obtained_results[strategy][param_name], dtype=np.float64)
histo_keys.add(strategy_name)
plot_numeric(current_parameter, data, histo_keys, output_dir + '/', param_name)
elif isinstance(current_parameter, CategoricalHyperparameter):
plot_categorical(priors, output_dir + '/', param_name)
| bsd-3-clause |
JohnGBaker/flare | python/flare.py | 1 | 35280 | import os
import math
import numpy as np
import subprocess
import re
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import astropy.units as units
from astropy.cosmology import Planck15 as cosmo,z_at_value
from matplotlib.backends.backend_pdf import PdfPages
import threading
import time
import sys
import traceback
import fmerge
flare_dir=os.path.dirname(os.path.realpath(__file__))[:-7]
multithreaded=True
threadLock=threading.Lock()
ireport=9
FisherRunFailCount=0
noRun=False
all_params_file=False
ROM_DATA_PATH="ROMdata/q1-12_Mfmin_0.0003940393857519091"
extra_flags=""
LISAvariant="LISA2017"
deltatobs=5.0
only22=False;
onlyInspiral=False;
postInspiral=False;
SampleReIm=False;
FisherReIm=True;
ReIm_npts=32768;
linearSNRplot=False;
def set_flare_flags(snr,params):
flags=""
#Waveform model
#flags += " --tagextpn 0" #Don't extend waveforms at low freq to allow lower masses
if(SampleReIm): flags+=" --tagint 1 --nbptsoverlap " + str(ReIm_npts) #gridded quadrature
#flags+=" --deltatobs 5.0" #duration in years of LISA observation
flags+=" --deltatobs "+str(deltatobs) #duration in years of LISA observation
#flags+=" --minf 1e-4" #minimun frequency included in analysis
#flags+=" --minf 3e-6" #minimun frequency included in analysis
minf=3e-6;
maxf=0.5;
if(onlyInspiral):
LSOf=fmerge.Mf(params[0],params[1]) #maximum frequency at LSO
if(LSOf<maxf):maxf=LSOf;
if(postInspiral):
LSOf=fmerge.Mf(params[0],params[1]) #maximum frequency at LSO
if(LSOf>minf):minf=LSOf;
flags+=" --maxf "+str(maxf) #maximum frequency included in analysis
flags+=" --minf "+str(minf) #minimum frequency included in analysis
#flags+=" --maxf 0.15" #maximum frequency included in analysis
if(only22):
flags+=" --nbmodeinj 1 --nbmodetemp 1" #for no higher modes in injection and template
else:
flags+=" --nbmodeinj 5 --nbmodetemp 5" #for no higher modes in injection and template
#set parameter vals
m1 = params[0]
m2 = params[1]
tRef = params[2]
phiRef = params[3]
if(snr>0):
dist = 2e4
else:
dist = params[4]
lam = params[5]
beta = params[6]
inc = params[7]
pol = params[8]
#flags+=" --comp-min 1e5 --comp-max 1e8" #min/max for component mass prior ranges
#flags+=" --mtot-min 1e4 --mtot-max 1e10 " #additional prior limits on Mtot and q
flags+=" --comp-min "+str(m2/3.)+" --comp-max "+str(m1*3.) #min/max factor of 3 for component mass prior ranges
flags+=" --mtot-min "+str((m1+m2)/10.)+" --mtot-max "+str((m1+m2)*10.) #min/max factor of 10 for total mass
if(snr>0):
flags+=" --snr "+str(snr)+" --rescale-distprior" #fixing SNR (rescales distance)
flags+=" --logflat-massprior" #assume prior uniform in log of masses, rather than uniform for mass."
#flags+=" --mtot-min 8e5 --mtot-max 2e8 --q-max 11.98" #additional prior limits on Mtot and q
#flags+=" --mtot-min 1e4 --mtot-max 1e10 --q-max 11.98" #additional prior limits on Mtot and q
flags+=" --q-max 11.98" #additional prior limits on q
#flags+=" --dist-min 5000. --dist-max 200e4 --distance 1e5" #prior range for distances should verify range based on distances (in Mpc).
#flags+=" --dist-min 1000. --dist-max 4e5" #prior range for distances approx 0.2<z<33
flags+=" --dist-min 500. --dist-max 1.3e6" #prior range for distances approx 0.1<z<100
flags+=" --flat-distprior" #by default us a flat prior on the distance rather than as R^2
flags+=" --variant "+LISAvariant #by default us a flat prior on the distance rather than as R^2
flags+=extra_flags #Default is none, but can add at runtime...
#set parameter flags
flags += " --phiRef "+str(phiRef)
flags += " --m1 "+str(m1)
flags += " --m2 "+str(m2)
flags += " --tRef "+str(tRef)
flags += " --distance "+str(dist)
flags += " --lambda "+str(lam)
flags += " --beta "+str(beta)
flags += " --inclination "+str(inc)
flags += " --polarization "+str(pol)
#print "flags : \n" + flags
return flags
def set_mcmc_flags(outroot,ptN):
flags = ""
#MCMC basics
flags += "--noFisher --rng_seed="+str(np.random.rand())
flags += " --outroot "+str(outroot)
#flags += " --nskip=40 --info_every=10000" #frequency of sampling/reporting
flags += " --save_every=40 --info_every=10000" #frequency of sampling/reporting
flags += " --prop=7 --de_ni=50 --gauss_1d_frac=0.5 --de_reduce_gamma=4" #differential evolution proposal distribution with Gaussian draws 1/2 of the time
#Parallel Tempering setup
flags += " --pt_stop_evid_err=0.05 --chain_ess_stop=2000 --pt_Tmax=1e9" #parallel tempering basics
if(ptN>0):
flags += " --pt="+str(ptN) #else default is 20
flags += " --pt_swap_rate=0.10" #rate of temp swaps (or default 0.01)
flags += " --pt_evolve_rate=0.01" #rate at which temps are allowed to evolve
#flags += " --pt_reboot_rate=0.0001 --pt_reboot_every=10000 --pt_reboot_grace=50000" #Somewhat hacky trick to avoid chains getting stuck. Not sure whether we need this.
#stopping criteria
flags += " --nsteps=100000000" #100 million steps is probably more than we can do
flags += " --pt_stop_evid_err=0.05" #may terminate earlier based on evidence criterion
return flags
def set_bambi_flags(outroot,nlive=4000, tol=1.0, multimodal=True):
flags = " --nlive "+str(nlive)+" --tol "+str(tol)
if(multimodal):flags += " --mmodal --nclspar 2 --maxcls 20 --ztol -60"
flags += " --seed"
flags += " --outroot "+outroot
return flags
def par_name(i):
return ["m1","m2","t0","D","phi0","inc","lambda","beta","pol","sky","orient","Mvol"][i]
def draw_params(Mtot,q):
#we suppose fixed Mtot,q,SNR and draw the other params
m1 = Mtot*q/(1.0+q)
m2 = Mtot/(1.0+q)
tRef = np.random.randn()*1e5
phiRef = 2*math.pi*np.random.rand()
dist = 100*10**(np.random.rand()*math.log10(400))
lam = np.random.rand()*2.0*math.pi
beta = math.acos(np.random.rand()*2.0-1)-math.pi/2.0
inc = math.acos(np.random.rand()*2.0-1)
pol = np.random.rand()*math.pi
params = [m1,m2,tRef,phiRef,dist,lam,beta,inc,pol]
return params
def perform_run(name,Mtot,q,snr):
if(BAMBI):
cmd = flare_dir+"/LISAinference/LISAinference"
flags = get_bambi_flags(name)
else:
cmd = flare_dir+"/LISAinference/LISAinference_ptmcmc"
flags = get_mcmc_flags(name,60)
params=draw_params(Mtot,q)
flags+=set_flare_flags(snr,params)
subprocess.call(cmd+" "+flags)
def SNRrun(Mtot,q,snr,name="dummy"):
cmd = flare_dir+"/LISAinference/LISAinference_ptmcmc"
flags = " --nsteps=0 --noFisher"
params=draw_params(Mtot,q)
flags+=set_flare_flags(snr,params)
flags += " --rng_seed="+str(np.random.rand())+" "
flags += " --outroot "+str(name)+" "
cmd += " "+flags+">"+name+".out"
setenv=""
#setenv = "export ROM_DATA_PATH=/Users/jgbaker/Projects/GWDA/LISA-type-response/flare/ROMdata/q1-12_Mfmin_0.0003940393857519091"
#setenv="export ROM_DATA_PATH=/discover/nobackup/jgbaker/GW-DA/flare/ROMdata/q1-12_Mfmin_0.0003940393857519091"
setenv="export ROM_DATA_PATH="+flare_dir+"/"+ROM_DATA_PATH
print( "Executing '"+cmd+"'")
code=subprocess.call(setenv+";"+cmd,shell=True)
print( "Run completed with code(",code,")")
print( "CWD=",os.getcwd())
with open(name+"params.txt",'r') as file:
lines=file.read()
#print lines
dist=re.search("dist_resc:(.*)", lines).group(1)
print( "distance =",dist)
return float(dist)
def tSNRrun(Mtot,q,snr,name,data):
cmd = flare_dir+"/LISAinference/LISAinference_ptmcmc"
flags = " --nsteps=0 --noFisher"
params=draw_params(Mtot,q)
flags+=set_flare_flags(snr,params)
flags += " --rng_seed="+str(np.random.rand())+" "
flags += " --outroot "+str(name)+" "
cmd += " "+flags+">"+name+".out"
setenv=""
#setenv = "export ROM_DATA_PATH=/Users/jgbaker/Projects/GWDA/LISA-type-response/flare/ROMdata/q1-12_Mfmin_0.0003940393857519091"
setenv="export ROM_DATA_PATH="+flare_dir+"/"+ROM_DATA_PATH
print( "Executing '"+cmd+"'")
code=subprocess.call(setenv+";"+cmd,shell=True)
print( "Run completed with code(",code,")")
with open(name+"params.txt",'r') as file:
lines=file.read()
#print lines
dist=re.search("dist_resc:(.*)", lines).group(1)
print( "distance =",dist)
if(not math.isnan(float(dist))):
data.append(float(dist))
else:
print( "NAN_DIST with command:",cmd)
return
def threadedSNRrun(Mtot,q,snr,label,Nruns,Nthreads,data):
irun=0
if(FisherRunFailCount==0):subprocess.call("echo 'Output from runs generating exceptions:' > fisher_fails.out",shell=True)
while(irun<Nruns):
if(Nthreads<Nruns-1):count=Nthreads
else: count=Nruns-irun
print( "irun=",irun,"Count=",count,"Nruns=",Nruns,"Nthreads=",Nthreads)
threads=[];
ith=0
for t in range(count):
ith+=1
thread = threading.Thread(target=tSNRrun, args=(Mtot,q,snr,label+str(ith),data))
thread.start()
threads.append(thread)
for thread in threads:
thread.join() #this blocks further execution until the thread has returned
irun += count
print( " Batch of runs done, now irun=",irun)
def SNRstudy(outlabel,MtotList,qList,SNRList,Navg,Nthreads=1):
pp = PdfPages(str(outlabel)+'SNRstudy.pdf')
for q in qList:
tags=[]
labels=[]
count=0
for snr in SNRList:
count+=1
y1=[]
y2=[]
x=[]
for Mtot in MtotList:
print( "Running SNRrun(",Mtot,",",q,",",snr,")")
data=[]
if(multithreaded and Nthreads>1):
threadedSNRrun(Mtot,q,snr,outlabel+"dummy",Navg,Nthreads,data)
else:
for i in range(Navg):
dist=SNRrun(Mtot,q,snr,outlabel+"dummy")
if(not math.isnan(dist)):
data.append(dist)
else:
print( "NAN_DIST!")
zs=np.zeros(0);
for dist in data:
z=z_at_value(cosmo.luminosity_distance,dist*units.Mpc,zmax=100000,ztol=1e-6)
print( "D=",dist," z=",z)
zs=np.append(zs,math.log10(z))
#zs[i]=math.log10(dist)
mean=np.mean(zs);
std=np.std(zs);
print( "M=",Mtot," q=",q,"dist=",mean,"+/-",std)
x.append(math.log10(Mtot/(1+10**mean)))
#x.append(math.log10(Mtot))
y1.append(mean-std)
y2.append(mean+std)
ylabel="log(z)"
ylim=[-1,3]
xlim=[2,9]
if(linearSNRplot):
y1=[10**y for y in y1]
y2=[10**y for y in y2]
ylabel="z"
ylim=[0,20]
xlim=[3.5,8.5]
print( "x=",x)
print( "y1=",y1)
print( "y2=",y2)
color=(0.2,0.8/math.sqrt(q),1.0/math.sqrt(count))
plot=plt.fill_between(x, y1, y2, facecolor=color,alpha=0.3, interpolate=True)
tags.append( Rectangle((0, 0), 1, 1, fc=color,alpha=0.3) )
labels.append("SNR="+str(snr))
print( "Finished band for SNR="+str(snr))
plt.legend(tags,labels)
plt.ylim(ylim)
plt.xlim(xlim)
plt.title("SNR contours for LISA q="+str(q)+" SMBH merger")
plt.ylabel(ylabel)
plt.xlabel("log(M/Msun)")
#plt.show()
pp.savefig()
plt.clf()
print( "Finished plot for q="+str(q))
pp.close()
def getFisherCommand(label,delta=0.1,extrapoints=1.0):
cmd = flare_dir+"/LISAinference/LISAinference_ptmcmc"
#npts= int(extrapoints*20/delta/delta) #Simplified variant, multiplied by additional factor of two to be conservative note seems we only have convergence at order (1/nbpts)^0.5 I
npts = ReIm_npts
flags = "--nsteps=0 --Fisher_err_target="+str(delta)+" --flat-distprior --deltaT 5000"
if(FisherReIm):
flags+=" --tagint 1 --nbptsoverlap "+str(npts)
name=str(label)
flags += " --rng_seed="+str(np.random.rand())+" "
flags += " --outroot "+str(name)+" "
cmd += " "+flags
return cmd;
def FisherRunByParams(snr,params,delta,label,extrapoints=1.0):
flags=set_flare_flags(snr,params)
cmd = getFisherCommand(label,delta)+" "+flags+" >"+str(label)+".out";
setenv="export ROM_DATA_PATH="+flare_dir+"/"+ROM_DATA_PATH
try:
print( "Executing '"+cmd+"'")
dist=0
cov=[]
if not noRun:
code=subprocess.call(setenv+";"+cmd,shell=True)
print( "Run "+name+" completed with code(",code,")")
with open(name+"params.txt",'r') as file:
lines=file.read()
#print lines
##
print( name+"params.txt")
dist=re.search("dist_resc:(.*)", lines).group(1)
print( "distance =",dist)
time.sleep(1)#pause to make sure file is ready to read.
cov=readCovarFile(name+"_fishcov.dat")
#v=math.sqrt(np.random.rand()-0.1)#to test behavior under occasional failures
except (ValueError,ArithmeticError):
print( "Exception",sys.exc_info()[0]," occurred in run"+name+" for params ",params)
FisherRunFailCount+=1
print( " FailCount=",FisherRunFailCount)
subprocess.call("echo '\n\n***********************\nFailure "+str(FisherRunFailCount)+"\n***********************\n"+cmd+"' |cat - "+name+".out "+name+"_fishcov.out >> fisher_fails.out",shell=True)
return [float(dist)]+cov
def FisherRun(Mtot,q,snr,delta,label,data,extrapoints=1.0):
global FisherRunFailCount
params=draw_params(Mtot,q)
if(not getattr(all_params_file,"write",None)==None):
threadLock.acquire()
all_params_file.write(str(snr)+"\t")
for pval in params:
all_params_file.write(str(pval)+"\t")
all_params_file.write("\n")
threadLock.release()
datum=FisherRunByParams(snr,params,delta,label,extrapoints)
data.append( datum )
return
def threadedFisherRun(Mtot,q,snr,delta,label,Nruns,Nthreads,data,extrapoints):
irun=0
if(FisherRunFailCount==0):subprocess.call("echo 'Output from runs generating exceptions:' > fisher_fails.out",shell=True)
while(irun<Nruns):
if(Nthreads<Nruns-1):count=Nthreads
else: count=Nruns-irun
print( "irun=",irun,"Count=",count,"Nruns=",Nruns,"Nthreads=",Nthreads)
threads=[];
ith=0
for t in range(count):
ith+=1
thread = threading.Thread(target=FisherRun, args=(Mtot,q,snr,delta,label+str(ith),data,extrapoints))
thread.start()
threads.append(thread)
for thread in threads:
thread.join() #this blocks further execution until the thread has returned
irun += count
print( " Batch of runs done, now irun=",irun)
def readCovarFile(file):
pars=[]
done=False
trycount=0
while not done:
try:
with open(file,'r') as f:
line="#"
while("#" in line): line=f.readline() #Skip comment
for val in line.split():
#print val
pars.append(float(val))
Npar=len(pars)
while(not "#Covariance" in line):line=f.readline() #Skip until the good stuff
covar=np.zeros((Npar,Npar))
i=0
for par in pars:
line=f.readline()
covar[i]=np.array(line.split())
i+=1
inc = pars[5] #runs from 0 to pi at poles
#lam = pars[6]
beta = pars[7] #runs from -pi/2 to pi/2 at poles
#pol = pars[8]
val=covar[0][0]
if val<0: dm1=float('nan')
else: dm1=math.sqrt(val)
val=covar[1][1]
if val<0: dm2=float('nan')
else: dm2=math.sqrt(val)
dtRef = math.sqrt(covar[2][2])
dD = math.sqrt(covar[3][3])
dphase = math.sqrt(covar[4][4])
dinc = math.sqrt(covar[5][5])
dlam = math.sqrt(covar[6][6])
dbeta = math.sqrt(covar[7][7])
dpol = math.sqrt(covar[8][8])
val=covar[6][6]*covar[7][7]-covar[6][7]**2
if val<0:
dsky=float('nan')
if -val<1e-13*covar[6][6]*covar[7][7]:dsky=0
else: dsky=math.sqrt(val)*math.cos(pars[7])
print( "sky",val,dsky,covar[6][6],covar[7][7])
val=covar[5][5]*covar[8][8]-covar[5][8]**2
if val<0:
dori=float('nan')
if -val<1e-13*covar[5][5]*covar[8][8]:dori=0
else: dori=math.sqrt(val)*math.sin(pars[5])
#HACK? need to verify factor of sin(inc) here!
val=covar[0][0]*covar[1][1]-covar[0][1]**2
if val<0:
dmvol=float('nan')
if -val<1e-13*covar[0][0]*covar[1][1]:dmvol=0
else: dmvol=math.sqrt(val)
done=True
except EnvironmentError:
print( "Something went wrong in trying to open covariance file:",sys.exc_info()[0])
print( "Try=",trycount)
subprocess.call("ls -ort")
trycount+=1;
if(trycount>10):
print( "giving up!!!")
done=True
raise
except ValueError:
print( traceback.format_exc(limit=1))
print( "Continuing after arithmetic error:")
#else: print "...No execption in read covar"
raise
return [dm1,dm2,dtRef,dD,dphase,dinc,dlam,dbeta,dpol,dsky,dori,dmvol]
def writeFisherSamples(name,numSamples):
fishcov=name+"_fishcov.dat"
outsamp=name+"_fishcov.dat"
with open(fishcov,'r') as f:
line="#"
while("#" in line): line=f.readline() #Skip comment
for val in line.split():
#print val
pars.append(float(val))
Npar=len(pars)
while(not "#Covariance" in line):line=f.readline() #Skip until the good stuff
covar=np.zeros((Npar,Npar))
i=0
for par in pars:
line=f.readline()
covar[i]=np.array(line.split())
i+=1
with open(outsamp,'w') as f:
for i in range(numSamples):
f.write("0 0 0 0 0 ")
dpars=np.random.multivariate_normal(pars,covar)
for val in pars+dpars:
f.write(" "+str(val))
f.write("\n")
def FisherStudy(outlabel,MtotList,qList,SNRList,deltalist,Navg,Nthreads,extrapoints=1.0):
pp = PdfPages(str(outlabel)+'FisherStudy.pdf')
datafile = open(outlabel+'FisherStudy.dat','w')
for q in qList:
tags=[]
labels=[]
snrcount=0
for snr in SNRList:
snrcount+=1
deltacount=0;
for delta in deltalist:
deltacount+=1
y1=[]
y2=[]
x=[]
for Mtot in MtotList:
data=[]
logzs=np.zeros(Navg);
print( "Running FisherRun(",Mtot,",",q,",",snr,")")
if(multithreaded):
threadedFisherRun(Mtot,q,snr,delta,outlabel+"dummy",Navg,Nthreads,data,extrapoints)
else:
for i in range(Navg):
FisherRun(Mtot,q,snr,delta,outlabel+"dummy",data,extrapoints)
if noRun: continue
for i in range(Navg):
distance=cosmo.luminosity_distance,data[i][0]*units.Mpc
print( "distance=",distance)
z=z_at_value(cosmo.luminosity_distance,data[i][0]*units.Mpc,zmax=100000,ztol=1e-6)
#print "D=",dist," z=",z
logzs[i]=math.log10(z)
meanz=np.mean(logzs);
stdz=np.std(logzs);
print( "M=",Mtot," q=",q,"z=",meanz,"+/-",stdz)
datafile.write(str(snr)+"\t"+str(delta)+"\t"+str(Mtot)+"\t"+str(q)+"\t"+str(meanz)+"\t"+str(stdz))
npdata=np.array(data)
print( "data:\n",data)
print( "npdata:\n",npdata)
Nstats=len(npdata[0,:])-1
print( "Nstats=",Nstats)
#i=0
#for d in [npdata[j,:] for j in range(Navg) ]:
# print "d:\n",d
# print "len[",i,"]=",d.size
# i+=1
# if not d.size==Nstats+1:
# print "wrong length for:\n ",d
means=[]
stds=[]
for i in range(Nstats):
print( "i=",i)
sel=npdata[:,i+1]
v=np.log10(np.array(sel))
mean=np.mean(v)
std=np.std(v)
means.append(mean)
stds.append(std)
print(" ",mean," +/- ",std)
datafile.write("\t"+str(mean)+"\t"+str(std))
datafile.write("\n")
datafile.flush()
x.append(math.log10(Mtot/(1+10**meanz)))
#x.append(math.log10(Mtot))
#y1.append(meanz-stdz)
#y2.append(meanz+stdz)
y1.append((means[ireport]-stds[ireport]))
y2.append((means[ireport]+stds[ireport]))
print( "x=",x)
print( "y1=",y1)
print( "y2=",y2)
color=(0.2,0.8/math.sqrt(q),1.0/math.sqrt(snrcount))
if(deltacount==1):
plot=plt.fill_between(x, y1, y2, facecolor=color,alpha=0.3, interpolate=True)
else:
plot=plt.plot(x,y1,color=color,alpha=1.0)
plot=plt.plot(x,y2,color=color,alpha=1.0)
tags.append( Rectangle((0, 0), 1, 1, fc=color,alpha=0.3) )
labels.append("SNR="+str(snr))
plt.legend(tags,labels)
plt.ylim([-1,7])
plt.xlim([3,9])
plt.title("Parameter uncertainty for LISA q="+str(q)+" SMBH merger")
plt.ylabel("log(d"+par_name(ireport)+")")
plt.xlabel("log(M/Msun)")
#plt.show()
pp.savefig()
plt.clf()
pp.close()
def FisherPlot(outlabel,ipar,qList,SNRList,deltalist,datafile,scaled=False,targetSNR=None,errorNsigma=2):
pp = PdfPages(str(outlabel)+'Fisher-'+par_name(ipar)+'.pdf')
#datafile = open(datafile,'r')
tol=1e-10
data=np.loadtxt(datafile)
punits=["Msun","Msun","s","Mpc","rad","rad","rad","rad","rad",r"$rad^2$",r"$rad^2$",r"$Msun^2$"]
sunits=["m1", "m2", "s","D", "rad","rad","rad","rad","rad",r"$rad^2$",r"$rad^2$",r"(m1*m2)"]
if(scaled):punits=sunits
for q in qList:
tags=[]
labels=[]
snrcount=0
for snr in SNRList:
if(targetSNR==None):targetSNR=snr
snrcount+=1
deltacount=0;
for delta in deltalist:
deltacount+=1
subdata=[]
for d in data:
if abs(d[0]/snr-1)<tol and abs(d[1]/delta-1)<tol and abs(d[3]/q-1)<tol:
subdata.append(d)
subdata=np.array(subdata)
print( "subdata=",subdata)
iMtot=2
imeanz=4
istdz=5
imeanpar=6+ipar*2
istdpar=7+ipar*2
#x.append(math.log10(Mtot/(1+10**meanz)))
#y1.append((means[ireport]-stds[ireport]))
#y2.append((means[ireport]+stds[ireport]))
#compute scalings, if needed
scales=subdata[:,iMtot]*0 #initially set all (logs of) scales to 0.0
if(scaled):#note all param error data are in log-space here
if(ipar==0): #scale by m1
scales=np.log10(subdata[:,iMtot]/(1+1/q))
if(ipar==1): #scale by m2
scales=np.log10(subdata[:,iMtot]/(1+q))
if(ipar==3): #scale by distance as computed from mean z
scales=np.array([math.log10(cosmo.luminosity_distance(10.0**zz).value) for zz in subdata[:,imeanz]])
if(ipar==11): #scale by m1*m2
scales=np.log10(subdata[:,iMtot]*subdata[:,iMtot]/(1+q)/(1+1/q))
meanzarray=subdata[:,imeanz]
SNRrescale_factor=1.0
if(not targetSNR==snr):
SNRrescale_factor=targetSNR/snr
#next make a new array of redshifts znew=z(D(z)/SNRrescale_factor)
meanzarray=np.array([math.log10(z_at_value(cosmo.luminosity_distance,cosmo.luminosity_distance(10**zz/SNRrescale_factor),zmax=100000,ztol=1e-6)) for zz in meanzarray])
print( "Rescaling SNR by ",SNRrescale_factor," from ",snr," to ", targetSNR)
x=[ math.log10(a/(1+10**b)) for a,b in zip(subdata[:,iMtot],meanzarray) ]
#print "x=",x
#print "imeanpar=",imeanpar
#print "y0=",subdata[:,imeanpar]
#print "dy=",subdata[:,istdpar]
y1=subdata[:,imeanpar]-subdata[:,istdpar]-scales + math.log10(1.0*errorNsigma/SNRrescale_factor)
#print "y1=",y1
y2=subdata[:,imeanpar]+subdata[:,istdpar]-scales + math.log10(1.0*errorNsigma/SNRrescale_factor)
#print "y2=",y2
color=(0.2,0.8/math.sqrt(q),1.0/math.sqrt(snrcount))
if(deltacount==1):
plot=plt.fill_between(x, y1, y2, facecolor=color,alpha=0.3, interpolate=True)
else:
plot=plt.plot(x,y1,color=color,alpha=1.0)
plot=plt.plot(x,y2,color=color,alpha=1.0)
#plt.plot(x,scales)
tags.append( Rectangle((0, 0), 1, 1, fc=color,alpha=0.3) )
labels.append("SNR="+str(targetSNR))
plt.legend(tags,labels)
#plt.ylim([-1,7])
plt.xlim([3,9])
plt.title("Parameter uncertainty for LISA q="+str(q)+" SMBH merger")
plt.ylabel("log("+str(errorNsigma)+r"$\sigma$"+"["+par_name(ipar)+"]/"+punits[ipar]+")")
plt.xlabel("log(M/Msun)")
#plt.show()
pp.savefig()
plt.clf()
pp.close()
def HorizonPlot(outlabel,ipar,qList,snr,delta,datafile,horizonlist,scaled=False,errorNsigma=2,show_range=False):
rangetag=''
if(show_range):rangetag='range-'
name=str(outlabel)+'Horizon-'+rangetag+par_name(ipar)+'.pdf'
print( "Making plot: "+name)
pp = PdfPages(name);
#datafile = open(datafile,'r')
tol=1e-10
data=np.loadtxt(datafile)
punits=["Msun","Msun","s","Mpc","rad","rad","rad","rad","rad",r"$rad^2$",r"$rad^2$",r"$Msun^2$"]
sunits=["m1", "m2", "s","D", "rad","rad","rad","rad","rad",r"$deg^2$",r"$rad^2$",r"(m1*m2)"]
if(scaled):punits=sunits
for q in qList:
tags=[]
labels=[]
subdata=[]
print( "finding data with SNR="+str(snr)+", delta="+str(delta)+", q="+str(q))
for d in data:
if abs(d[0]/snr-1)<tol and abs(d[1]/delta-1)<tol and abs(d[3]/q-1)<tol:
subdata.append(d)
subdata=np.array(subdata)
print( "subdata=",subdata)
iMtot=2
imeanz=4
istdz=5
imeanpar=6+ipar*2
istdpar=7+ipar*2
#compute scalings, if needed
scales=subdata[:,iMtot]*0 #initially set all (logs of) scales to 0.0
if(scaled):#note all param error data are in log-space here
if(ipar==0): #scale by m1
scales=np.log10(subdata[:,iMtot]/(1+1/q))
if(ipar==1): #scale by m2
scales=np.log10(subdata[:,iMtot]/(1+q))
if(ipar==3): #scale by distance as computed from mean z
scales=np.array([math.log10(cosmo.luminosity_distance(10.0**zz).value) for zz in subdata[:,imeanz]])
if(ipar==9): #scale by arcmin^2
scales=np.full_like(subdata[:,iMtot], 2*math.log10(math.pi/180.0))
if(ipar==11): #scale by m1*m2
scales=np.log10(subdata[:,iMtot]*subdata[:,iMtot]/(1+q)/(1+1/q))
print( scales)
colorcount=0
for horizoncut in horizonlist:
colorcount+=1
meanzarray=subdata[:,imeanz]
stdpararray=subdata[:,istdpar]
testvalues=10**(subdata[:,imeanpar]-scales + math.log10(1.0*errorNsigma))
SNRrescale_factors=testvalues/horizoncut
dSNRrescale_factors=10**stdpararray;
if(ipar>=9):#derived quadratic scaled stats
SNRrescale_factors=np.sqrt(SNRrescale_factors)
dSNRrescale_factors=np.sqrt(dSNRrescale_factors)
midzarray=np.array([math.log10(z_at_value(cosmo.luminosity_distance,cosmo.luminosity_distance(10**zz/fac),zmax=100000,ztol=1e-6)) for zz,fac in zip(meanzarray,SNRrescale_factors)])
if(show_range):
topzarray=np.array([math.log10(z_at_value(cosmo.luminosity_distance,cosmo.luminosity_distance(10**zz/fac),zmax=100000,ztol=1e-6)) for zz,fac in zip(meanzarray,SNRrescale_factors/dSNRrescale_factors)])
botzarray=np.array([math.log10(z_at_value(cosmo.luminosity_distance,cosmo.luminosity_distance(10**zz/fac),zmax=100000,ztol=1e-6)) for zz,fac in zip(meanzarray,SNRrescale_factors*dSNRrescale_factors)])
x=[ math.log10(a/(1+10**b)) for a,b in zip(subdata[:,iMtot],midzarray) ]
color=(1.0-colorcount/(len(horizonlist)+1.0),0.8/math.sqrt(q),colorcount/(len(horizonlist)+1.0))
if(show_range):
plot=plt.fill_between(x, botzarray, topzarray, facecolor=color,alpha=0.3, interpolate=True)
color=np.array(color)*0.8
plot=plt.plot(x,midzarray,color=color,alpha=1.0)
tags.append( Rectangle((0, 0), 1, 1, fc=color,alpha=0.3) )
labels.append(str(horizoncut)+"="+str(errorNsigma)+r"$\sigma$"+"["+par_name(ipar)+"]/"+punits[ipar])
plt.legend(tags,labels)
plt.ylim([-1,3])
plt.xlim([3,9])
plt.title("Science range for "+outlabel+" q="+str(q)+" SMBH merger")
plt.ylabel("log(z)")
plt.xlabel("log(M/Msun)")
#plt.show()
pp.savefig()
plt.clf()
pp.close()
def HorizonCompare(outlabel,ipar,qList,snr,delta,datafiles,horizonlist,scaled=False,errorNsigma=2):
rangetag=''
pp = PdfPages(str(outlabel)+'Horizon-'+rangetag+par_name(ipar)+'.pdf')
#datafile = open(datafile,'r')
tol=1e-10
datalist=[]
for datafile in datafiles:
datalist.append(np.loadtxt(datafile))
punits=["Msun","Msun","s","Mpc","rad","rad","rad","rad","rad",r"$rad^2$",r"$rad^2$",r"$Msun^2$"]
sunits=["m1", "m2", "s","D", "rad","rad","rad","rad","rad",r"$deg^2$",r"$rad^2$",r"(m1*m2)"]
if(scaled):punits=sunits
for q in qList:
tags=[]
labels=[]
subdatalist=[]
for data in datalist:
subdata=[]
for d in data:
if abs(d[0]/snr-1)<tol and abs(d[1]/delta-1)<tol and abs(d[3]/q-1)<tol:
subdata.append(d)
subdatalist.append(subdata)
subdata=np.array(subdata)
print( "subdata=",subdata)
iMtot=2
imeanz=4
istdz=5
imeanpar=6+ipar*2
istdpar=7+ipar*2
#compute scalings, if needed
#scales=subdata[:,iMtot]*0 #initially set all (logs of) scales to 0.0
scaleslist=subdatalist[:,:,iMtot]*0 #initially set all (logs of) scales to 0.0
if(scaled):#note all param error data are in log-space here
if(ipar==0): #scale by m1
scaleslist=np.log10(subdatalist[:,:,iMtot]/(1+1/q))
if(ipar==1): #scale by m2
scaleslist=np.log10(subdatalist[:,:,iMtot]/(1+q))
if(ipar==3): #scale by distance as computed from mean z
scaleslist=np.array([[math.log10(cosmo.luminosity_distance(10.0**zz).value) for zz in subdata[:,imeanz]] for subdata in subdatalist])
if(ipar==9): #scale by arcmin^2
scaleslist=np.full_like(subdatalist[:,:,iMtot], 2*math.log10(math.pi/180.0))
if(ipar==11): #scale by m1*m2
scaleslist=np.log10(subdatalist[:,:,iMtot]*subdatalist[::,iMtot]/(1+q)/(1+1/q))
print( scales)
colorcount=0
for horizoncut in horizonlist:
colorcount+=1
meanzarraylist=subdatalist[:,:,imeanz]
stdpararraylist=subdatalist[:,:,istdpar]
testvalueslist=10**(subdatalist[:,:,imeanpar]-scales + math.log10(1.0*errorNsigma))
SNRrescale_factorslist=testvalueslist/horizoncut
dSNRrescale_factorslist=10**stdpararraylist;
if(ipar>=9):#derived quadratic scaled stats
SNRrescale_factorslist=np.sqrt(SNRrescale_factorslist)
dSNRrescale_factorslist=np.sqrt(dSNRrescale_factorslist)
#midzarray=np.array([math.log10(z_at_value(cosmo.luminosity_distance,cosmo.luminosity_distance(10**zz/fac),zmax=100000,ztol=1e-6)) for zz,fac in zip(meanzarray,SNRrescale_factors)])
midzarraylist=np.array([[math.log10(z_at_value(cosmo.luminosity_distance,cosmo.luminosity_distance(10**zz/fac),zmax=100000,ztol=1e-6)) for zz,fac in pairs ] for pairs in np.dstack(meanzarray,SNRrescale_factors)])
x=[ [ math.log10(a/(1+10**b)) for a,b in pairs ] for pairs in np.dstack(subdata[:,iMtot],midzarray) ]
color=(1.0-colorcount/(len(horizonlist)+1.0),0.8/math.sqrt(q),colorcount/(len(horizonlist)+1.0))
for ifile in range(len(datafilelist)):
plt.plot(x[ifile],midzarray[ifile],color=color,alpha=1.0)
tags.append( Rectangle((0, 0), 1, 1, fc=color,alpha=0.3) )
labels.append(str(horizoncut)+"="+str(errorNsigma)+r"$\sigma$"+"["+par_name(ipar)+"]/"+punits[ipar]+" ("+datafilelist[ifile]+")" )
plt.legend(tags,labels)
plt.ylim([-1,3])
plt.xlim([3,9])
plt.title("Science range for "+outlabel+" q="+str(q)+" SMBH merger")
plt.ylabel("log(z)")
plt.xlabel("log(M/Msun)")
#plt.show()
pp.savefig()
plt.clf()
pp.close()
| apache-2.0 |
iancze/PSOAP | tests/test_orbit_astrometry_GJ3305AB.py | 1 | 10006 | import pytest
import numpy as np
from psoap import orbit_astrometry
from psoap import constants as C
import matplotlib.pyplot as plt
from astropy.time import Time
import matplotlib
import os
import pkg_resources
# Create plots of all of the orbits
from astropy.io import ascii
outdir = "tests/plots/GJ3305AB/"
if not os.path.exists(outdir):
print("Creating ", outdir)
os.makedirs(outdir)
# Load the Montet RV dataset
data_fname = pkg_resources.resource_filename("psoap", "data/GJ3305AB/rv.txt")
data = ascii.read(data_fname, format="csv")
# convert UT date to JD
rv_jds_A = Time(data["date"], format="decimalyear")
rv_jds_A.format = 'jd'
rv_jds_A = rv_jds_A.value
vAs_data = data["RV"]
vAs_err = data["RV_err"]
# Load the Montet astrometry dataset
astro_fname = pkg_resources.resource_filename("psoap", "data/GJ3305AB/astro.txt")
astro_data = ascii.read(astro_fname, format="csv")
rho_data = astro_data["rho"]
rho_err = astro_data["rho_err"]
theta_data = astro_data["PA"]
theta_err = astro_data["PA_err"]
astro_jds = Time(astro_data["date"], format="decimalyear")
astro_jds.format = 'jd'
astro_jds = astro_jds.value
def test_data():
# Make a plot of the astrometric data on the sky
fig, ax = plt.subplots(nrows=1)
xs = rho_data * np.cos(theta_data * np.pi/180)
ys = rho_data * np.sin(theta_data * np.pi/180)
ax.plot(xs, ys, ".")
ax.set_xlabel("North")
ax.set_ylabel("East")
ax.plot(0,0, "k*")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "data_astro.png")
plt.close('all')
dpc = 29.43 # pc
# Orbital elements for 41 Dra
a = 9.78 # [AU]
e = 0.19
# e = 0.3
i = 92.1 # [deg]
# omega = -69 # omega_1
# omega = 0
omega = -69
# omega_2 = omega_2 + 180
# omega_2 = omega + 180
# omega = omega_2 - 180# [deg] # we actua
Omega = 18.8 + 180 # [deg]
# Omega = 20 # [deg]
T0 = Time(2007.14, format="decimalyear")
T0.format = "jd"
T0 = T0.value # [Julian Date]
M_2 = 0.44 # [M_sun]
M_tot = 0.67 + M_2 # [M_sun]
gamma = 20.76 # [km/s]
P = np.sqrt(4 * np.pi**2 / (C.G * M_tot * C.M_sun) * (a * C.AU)**3) / (24 * 3600) # [day]
# Pick a span of dates for one period
dates = np.linspace(T0, T0 + P, num=600)
# Initialize the orbit
orb = orbit_astrometry.Binary(a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=dates)
full_dict = orb.get_full_orbit()
vAs, vBs, XYZ_As, XYZ_Bs, XYZ_ABs, xy_As, xy_Bs, xy_ABs = [full_dict[key] for key in ("vAs", "vBs", "XYZ_As", "XYZ_Bs", "XYZ_ABs", "xy_As", "xy_Bs", "xy_ABs")]
polar_dict = orb.get_orbit()
vAs, vBs, rho_ABs, theta_ABs = [polar_dict[key] for key in ("vAs", "vBs", "rhos", "thetas")]
# Convert to sky coordinates, using distance
alpha_dec_As = XYZ_As/dpc # [arcsec]
alpha_dec_Bs = XYZ_Bs/dpc # [arcsec]
alpha_dec_ABs = XYZ_ABs/dpc # [arcsec]
rho_ABs = rho_ABs/dpc # [arcsec]
peri_A = orb._get_periastron_A()/dpc
peri_B = orb._get_periastron_B()/dpc
peri_BA = orb._get_periastron_BA()/dpc
asc_A = orb._get_node_A()/dpc
asc_B = orb._get_node_B()/dpc
asc_BA = orb._get_node_BA()/dpc
# Since we are plotting vs one date, we need to plot the dots using a color scale so we can figure them out along the orbit.
# Set a colorscale for the lnprobs
cmap_primary = matplotlib.cm.get_cmap("Blues")
cmap_secondary = matplotlib.cm.get_cmap("Oranges")
norm = matplotlib.colors.Normalize(vmin=np.min(dates), vmax=np.max(dates))
# Determine colors based on the ending lnprob of each walker
def plot_points(ax, dates, xs, ys, primary):
for date, x, y in zip(dates, xs, ys):
if primary:
c = cmap_primary(norm(date))
else:
c = cmap_secondary(norm(date))
ax.plot(x, y, "o", color=c, mew=0.1, ms=3, mec="k")
# Then, we will make 3D plots of the orbit so that we can square with what we think is happening.
# The final crowning grace will be a 3D matplotlib plot of the orbital path.
def test_B_rel_A():
# Plot the Orbits
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, alpha_dec_ABs[:,0], alpha_dec_ABs[:,1], False)
ax.plot(0,0, "*k", ms=2)
ax.plot(peri_BA[0], peri_BA[1], "ko", ms=3)
ax.plot(asc_BA[0], asc_BA[1], "o", color="C2", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha \cos \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_B_rel_A.png")
# Make a series of astrometric plots from different angles.
def test_AB_Z():
# Now plot A and B together, viewed from the Z axis
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,0], alpha_dec_As[:,1], True)
plot_points(ax, dates, alpha_dec_Bs[:,0], alpha_dec_Bs[:,1], False)
ax.plot(peri_A[0], peri_A[1], "ko", ms=3)
ax.plot(peri_B[0], peri_B[1], "ko", ms=3)
ax.plot(asc_A[0], asc_A[1], "^", color="C0", ms=3)
ax.plot(asc_B[0], asc_B[1], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha$ mas")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.15, right=0.85, bottom=0.15, top=0.85)
fig.savefig(outdir + "orbit_AB_Z.png")
def test_AB_X():
# Now plot A and B together, viewed from the X axis
# This means Y will form the "X" axis, or North
# And Z will form the Y axis, or towards observer
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,1], alpha_dec_As[:,2], True)
plot_points(ax, dates, alpha_dec_Bs[:,1], alpha_dec_Bs[:,2], False)
ax.plot(peri_A[1], peri_A[2], "ko", ms=3)
ax.plot(peri_B[1], peri_B[2], "ko", ms=3)
ax.plot(asc_A[1], asc_A[2], "^", color="C0", ms=3)
ax.plot(asc_B[1], asc_B[2], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \alpha$ mas")
ax.set_ylabel(r"$\Delta Z$ mas (towards observer)")
ax.axhline(0, ls=":", color="k")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_X.png")
def test_AB_Y():
# Now plot A and B together, viewed from the Y axis
# This means Z will form the "X" axis, or towards the observer
# And X will form the Y axis, or East
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,2], alpha_dec_As[:,0], True)
plot_points(ax, dates, alpha_dec_Bs[:,2], alpha_dec_Bs[:,0], False)
ax.plot(peri_A[2], peri_A[0], "ko", ms=3)
ax.plot(peri_B[2], peri_B[0], "ko", ms=3)
ax.plot(asc_A[2], asc_A[0], "^", color="C0", ms=3)
ax.plot(asc_B[2], asc_B[0], "^", color="C1", ms=3)
ax.axvline(0, ls=":", color="k")
ax.set_xlabel(r"$\Delta Z$ mas (towards observer)")
ax.set_ylabel(r"$\Delta \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_Y.png")
def test_vel_rho_theta_one_p():
# Plot velocities, rho, and theta as function of time for one period
fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(8,8))
ax[0].plot(dates, vAs)
# ax[0].errorbar(rv_jds_A, vAs_data, yerr=vAs_err, ls="")
# ax[0].plot(rv_jds_A, vAs_data, "k.")
ax[0].set_ylabel(r"$v_A$ km/s")
ax[1].plot(dates, vBs)
# ax[1].errorbar(rv_jds_B, vBs_data, yerr=vBs_err, ls="")
# ax[1].plot(rv_jds_B, vBs_data, "k.")
ax[1].set_ylabel(r"$v_B$ km/s")
ax[2].plot(dates, rho_ABs)
# ax[2].errorbar(astro_jds, rho_data, yerr=rho_err, ls="")
# ax[2].plot(astro_jds, rho_data, "k.")
ax[2].set_ylabel(r"$\rho_\mathrm{AB}$ [mas]")
ax[3].plot(dates, theta_ABs)
# ax[3].errorbar(astro_jds, theta_data, yerr=theta_err, ls="")
# ax[3].plot(astro_jds, theta_data, "k.")
ax[3].set_ylabel(r"$\theta$ [deg]")
ax[-1].set_xlabel("date")
fig.savefig(outdir + "orbit_vel_rho_theta_one_period.png", dpi=400)
# Now make a 3D Orbit and pop it up
def test_B_rel_A_plane():
# Plot the orbits in the plane
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, xy_ABs[:,0], xy_ABs[:,1], False)
ax.plot(0,0, "*k", ms=10)
ax.set_xlabel(r"$X$ [AU]")
ax.set_ylabel(r"$Y$ [AU]")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_B_rel_A_plane.png")
def test_AB_plane():
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, xy_As[:,0], xy_As[:,1], True)
plot_points(ax, dates, xy_Bs[:,0], xy_Bs[:,1], False)
ax.plot(0,0, "ko", ms=10)
ax.set_xlabel(r"$X$ [AU]")
ax.set_ylabel(r"$Y$ [AU]")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_plane.png")
# Redo this using a finer space series of dates spanning the full series of observations.
# Pick a span of dates for the observations
dates = np.linspace(2452240, 2457265, num=3000) # [day]
orb = orbit_astrometry.Binary(a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=dates)
polar_dict = orb.get_orbit()
vAs, vBs, rho_ABs, theta_ABs = [polar_dict[key] for key in ("vAs", "vBs", "rhos", "thetas")]
# Convert to sky coordinates, using distance
rho_ABs = rho_ABs/dpc # [arcsec]
def test_vel_rho_theta():
# Plot velocities, rho, and theta as function of time
fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(12,8))
ax[0].plot(dates, vAs)
ax[0].errorbar(rv_jds_A, vAs_data, yerr=vAs_err, ls="")
ax[0].plot(rv_jds_A, vAs_data, "k.")
ax[0].set_ylabel(r"$v_A$ km/s")
ax[1].plot(dates, vBs)
# ax[1].errorbar(rv_jds_B, vBs_data, yerr=vBs_err, ls="")
# ax[1].plot(rv_jds_B, vBs_data, "k.")
ax[1].set_ylabel(r"$v_B$ km/s")
ax[2].plot(dates, rho_ABs)
ax[2].errorbar(astro_jds, 1e-3*rho_data, yerr=1e-3*rho_err, ls="")
ax[2].plot(astro_jds, 1e-3*rho_data, "k.")
ax[2].set_ylabel(r"$\rho_\mathrm{AB}$ [arcsec]")
ax[3].plot(dates, theta_ABs)
ax[3].errorbar(astro_jds, theta_data, yerr=theta_err, ls="")
ax[3].plot(astro_jds, theta_data, "k.")
ax[3].set_ylabel(r"$\theta$ [deg]")
ax[-1].set_xlabel("date")
fig.savefig(outdir + "orbit_vel_rho_theta.png", dpi=400)
plt.close('all')
plt.close('all')
| mit |
voxlol/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
bhargavasana/activitysim | activitysim/skim.py | 1 | 14509 | # ActivitySim
# Copyright (C) 2014-2015 Synthicity, LLC
# See full license in LICENSE.txt.
import numpy as np
import pandas as pd
class Skim(object):
"""
Container for skim arrays.
Parameters
----------
data : 2D array
offset : int, optional
An optional offset that will be added to origin/destination
values to turn them into array indices.
For example, if zone IDs are 1-based, an offset of -1
would turn them into 0-based array indices.
"""
def __init__(self, data, offset=None):
self.data = np.asanyarray(data)
self.offset = offset
def get(self, orig, dest):
"""
Get impedence values for a set of origin, destination pairs.
Parameters
----------
orig : 1D array
dest : 1D array
Returns
-------
values : 1D array
"""
# only working with numpy in here
orig = np.asanyarray(orig)
dest = np.asanyarray(dest)
out_shape = orig.shape
# filter orig and dest to only the real-number pairs
notnan = ~(np.isnan(orig) | np.isnan(dest))
orig = orig[notnan].astype('int')
dest = dest[notnan].astype('int')
if self.offset:
orig = orig + self.offset
dest = dest + self.offset
result = self.data[orig, dest]
# add the nans back to the result
out = np.empty(out_shape)
out[notnan] = result
out[~notnan] = np.nan
return out
class Skims(object):
"""
A skims object is a wrapper around multiple skim objects,
where each object is identified by a key. It operates like a
dictionary - i.e. use brackets to add and get skim objects - but also
has information on how to lookup against the skim objects.
Specifically, this object has a dataframe, a left_key and right_key.
It is assumed that left_key and right_key identify columns in df. The
parameter df is usually set by the simulation itself as it's a result of
interacting choosers and alternatives.
When the user calls skims[key], key is an identifier for which skim
to use, and the object automatically looks up impedances of that skim
using the specified left_key column in df as the origin and
the right_key column in df as the destination. In this way, the user
does not do the O-D lookup by hand and only specifies which skim to use
for this lookup. This is the only purpose of this object: to
abstract away the O-D lookup and use skims by specifying which skim
to use in the expressions.
Note that keys are any hashable object, not just strings. So calling
skim[('AM', 'SOV')] is valid and useful.
"""
def __init__(self):
self.skims = {}
self.left_key = "TAZ"
self.right_key = "TAZ_r"
self.df = None
def set_keys(self, left_key, right_key):
"""
Set the left and right keys.
Parameters
----------
left_key : String
The left key (origin) column in the dataframe
right_key : String
The right key (destination) column in the dataframe
Returns
--------
Nothing
"""
self.left_key = left_key
self.right_key = right_key
return self
def set_df(self, df):
"""
Set the dataframe
Parameters
----------
df : DataFrame
The dataframe which contains the origin and destination ids
Returns
-------
Nothing
"""
self.df = df
def lookup(self, skim):
"""
Generally not called by the user - use __getitem__ instead
Parameters
----------
skim: Skim
The skim object to perform the lookup using df[left_key] as the
origin and df[right_key] as the destination
Returns
-------
impedances: pd.Series
A Series of impedances which are elements of the Skim object and
with the same index as df
"""
assert self.df is not None, "Call set_df first"
s = skim.get(self.df[self.left_key],
self.df[self.right_key])
return pd.Series(s, index=self.df.index)
def set_3d(self, key, key_3d, value):
"""
If you want to use the Skims3D object below, you will need to do that
explicitly by setting first the key which will be used by __getattr__
and second the key that relates to the 3rd dimension of the dataframe.
Parameters
----------
key : String or any hashable
Will be accessible using __getitem__ in Skims3d
key_3d : String or any hashable
Relates to the 3rd dimension lookup column set by Skims3D
value : Skim
the skim object for these keys
"""
self.skims[(key, key_3d)] = value
def get_3d(self, key, key_3d):
"""
If you want
Parameters
----------
key : String or any hashable
Will be accessible using __getitem__ in Skims3d
key_3d : String or any hashable
Relates to the 3rd dimension lookup column set by Skims3D
Returns
-------
skims : Skim
the skim object for these keys
"""
return self.skims[(key, key_3d)]
def __setitem__(self, key, value):
"""
Set an available skim object
Parameters
----------
key : hashable
The key (identifier) for this skim object
value : Skim
The skim object
Returns
-------
Nothing
"""
self.skims[key] = value
def __getitem__(self, key):
"""
Get an available skim object
Parameters
----------
key : hashable
The key (identifier) for this skim object
Returns
-------
skim: Skim
The skim object
"""
return self.lookup(self.skims[key])
class Skims3D(object):
"""
A 3DSkims object wraps a skim objects to add an additional wrinkle of
lookup functionality. Upon init the separate skims objects are
processed into a 3D matrix so that lookup of the different skims can
be performed quickly for each row in the dataframe. In this very
particular formulation, the keys are assumed to be tuples with two
elements - the second element of which will be taken from the
different rows in the dataframe. The first element can then be
dereferenced like an array. This is useful, for instance, to have a
certain skim vary by time of day - the skims are set with keys of
('SOV', 'AM"), ('SOV', 'PM') etc. The time of day is then taken to
be different for every row in the tours table, and the 'SOV' portion
of the key can be used in __getitem__.
To be more explicit, the input is a dictionary of Skims objects, each of
which contains a 2D matrix. These are stacked into a 3D matrix with a
mapping of keys to indexes which is applied using pandas .map to a third
column in the object dataframe. The three columns - left_key and
right_key from the Skims object and skim_key from this one, are then used to
dereference the 3D matrix. The tricky part comes in defining the key which
matches the 3rd dimension of the matrix, and the key which is passed into
__getitem__ below (i.e. the one used in the specs). By convention,
every key in the Skims object that is passed in MUST be a tuple with 2
items. The second item in the tuple maps to the items in the dataframe
referred to by the skim_key column and the first item in the tuple is
then available to pass directly to __getitem__. This is now made
explicit by adding the set_3d and get_3d methods in the Skims object which
take the two keys independently and convert to the tuple internally.
The sum conclusion of this is that in the specs, you can say something
like out_skim['SOV'] and it will automatically dereference the 3D matrix
using origin, destination, and time of day.
Parameters
----------
skims: Skims
This is the Skims object to wrap
skim_key : str
This identifies the column in the dataframe which is used to
select among Skim object using the SECOND item in each tuple (see
above for a more complete description)
offset : int, optional
A single offset must be used for all Skim objects - previous
offsets will be ignored
"""
def __init__(self, skims, skim_key, offset=None):
self.left_key = skims.left_key
self.right_key = skims.right_key
self.offset = offset
self.skim_key = skim_key
self.df = skims.df
self.omx = None
self.skims_data = {}
self.skim_keys_to_indexes = {}
# pass to make dictionary of dictionaries where highest level is unique
# first items of the tuples and the 2nd level is the second items of
# the tuples
for key, value in skims.skims.iteritems():
if not isinstance(key, tuple) or not len(key) == 2:
print "WARNING, skipping key: ", key
continue
skim_key1, skim_key2 = key
self.skims_data.setdefault(skim_key1, {})[skim_key2] = value.data
# second pass to turn the each highest level value into a 3D array
# with a dictionary to make second level keys to indexes
for skim_key1, value in self.skims_data.iteritems():
self.skims_data[skim_key1] = np.dstack(value.values())
self.skim_keys_to_indexes[skim_key1] = \
dict(zip(value.keys(), range(len(value))))
def set_df(self, df):
"""
Set the dataframe
Parameters
----------
df : DataFrame
The dataframe which contains the origin and destination ids
Returns
-------
Nothing
"""
self.df = df
def lookup(self, key, origins, destinations, skim_indexes):
"""
Generally not called by the user - use __getitem__ instead
Parameters
----------
key : String
origins : Series
Identifies the origins of trips as indexes
destinations : Series
Identifies the destinations of trips as indexes
skim_index : Series
Identifies the indexes of the skims so that different skims can
be used for different rows
Returns
-------
impedances: pd.Series
A Series of impedances which are elements of the Skim object and
with the same index as df
"""
assert self.df is not None, "Call set_df first"
if self.offset:
origins = origins + self.offset
destinations = destinations + self.offset
return self.skims_data[key][origins, destinations, skim_indexes]
def __getitem__(self, key):
"""
Get an available skim object
Parameters
----------
key : hashable
The key (identifier) for this skim object
Returns
-------
skim: Skim
The skim object
"""
if self.omx:
# read off the disk on the fly
self._build_single_3d_matrix_from_disk(key)
origins = self.df[self.left_key].astype('int')
destinations = self.df[self.right_key].astype('int')
skim_indexes = self.df[self.skim_key].\
map(self.skim_keys_to_indexes[key]).astype('int')
ret = pd.Series(
self.lookup(key, origins, destinations, skim_indexes),
self.df.index
)
if self.omx:
# and now destroy
self._tear_down_single_3d_matrix(key)
return ret
"""
So these three function allow the use of reading skims directly from the OMX
file - ON DISK - rather than storing all your skims in memory. This
comes about well, first, because I run out of memory on my machine and on
Travis when reading all the skims into memory, and second, that with the
exception of the distance matrix, we really only use each skim 1-2 times
each and pretty much all in the mode choice model. And even though each
skim for 1454 zone system is only about 16MB, we have about 300 skim files
which can get large pretty fast (although I think it should be manageable
even still. So the job here is to build the 3D skims file, stacking the
skims for different time periods into a single 3D matrix (origin,
destination, and time period). Unfortunately this doesn't run as fast as I
thought it might - I actually think the stacking is pretty slow especially
so this code currently uses a shortcut to just read in DIST over and over
again (which is the only skim I have access to right now anyway). In the
Travis tests I actually build a random skim to use for this matrix anyway,
so that I don't have to check into git a 16MB file. Anyway, this should be
considered a work-in-progress and a "low memory" mode. It is not right now
working very well (I mean it works, just very slowly).
"""
def get_from_omx(self, key, v):
# treat this as a callback - override depending on how you store
# skims in the omx file - for now we just read the same one over and
# over for testing purposes and to reduce memory use
print "Getting from omx", key, v
# the only skim we have right now is distance
return self.omx['DIST']
def _build_single_3d_matrix_from_disk(self, key):
print "Building 3d matrix from disk for key = ", key
uniq = self.df[self.skim_key].unique()
if hasattr(self, 'mat'):
# being sneaky to make it go faster
self.skims_data[key] = self.mat
else:
self.skims_data[key] = np.dstack(
[self.get_from_omx(key, v) for v in uniq])
self.mat = self.skims_data[key]
self.skim_keys_to_indexes[key] = {i: v for i, v in
zip(uniq, range(len(uniq)))}
def _tear_down_single_3d_matrix(self, key):
del self.skims_data[key]
del self.skim_keys_to_indexes[key]
def set_omx(self, omx):
self.omx = omx
| agpl-3.0 |
kwhitehall/climate | rcmet/src/main/python/rcmes/cli/do_rcmes_processing_sub.py | 1 | 42559 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#!/usr/local/bin/python
"""
PENDING DEPRICATION - YOU SHOULD INSTEAD USE THE rcmet.py within the bin dir
Module that is used to lauch the rcmes processing from the rcmet_ui.py
script.
"""
import os, sys
import datetime
import numpy
import numpy.ma as ma
import toolkit.plots as plots
import mpl_toolkits.basemap.cm as cm
import matplotlib.pyplot as plt
import storage.db as db
import storage.files as files
import toolkit.process as process
import toolkit.metrics as metrics
def do_rcmes(settings, params, model, mask, options):
'''
Routine to perform full end-to-end RCMET processing.
i) retrieve observations from the database
ii) load in model data
iii) temporal regridding
iv) spatial regridding
v) area-averaging
vi) seasonal cycle compositing
vii) metric calculation
viii) plot production
Input:
5 dictionaries which contain a huge argument list with all of the user options
(which can be collected from the GUI)
settings - dictionary of rcmes run settings::
settings = {"cacheDir": string describing directory path,
"workDir": string describing directory path,
"fileList": string describing model file name + path }
params - dictionary of rcmes run parameters::
params = {"obsDatasetId": int( db dataset id ),
"obsParamId": int( db parameter id ),
"startTime": datetime object (needs to change to string + decode),
"endTime": datetime object (needs to change to string + decode),
"latMin": float,
"latMax": float,
"lonMin": float,
"lonMax": float }
model - dictionary of model parameters::
model = {"varName": string describing name of variable to evaluate (as written in model file),
"timeVariable": string describing name of time variable (as written in model file),
"latVariable": string describing name of latitude variable (as written in model file),
"lonVariable": string describing name of longitude variable (as written in model file) }
mask - dictionary of mask specific options (only used if options['mask']=True)::
mask = {"latMin": float,
"latMax": float,
"lonMin": float,
"lonMax": float}
options - dictionary full of different user supplied options::
options = {"regrid": str( 'obs' | 'model' | 'regular' ),
"timeRegrid": str( 'full' | 'annual' | 'monthly' | 'daily' ),
"seasonalCycle": Boolean,
"metric": str('bias'|'mae'|'acc'|'pdf'|'patcor'|'rms'|'diff'),
"plotTitle": string describing title to use in plot graphic,
"plotFilename": basename of file to use for plot graphic i.e. {plotFilename}.png,
"mask": Boolean,
"precip": Boolean }
Output: image files of plots + possibly data
'''
# check the number of model data files
if len(settings['fileList']) < 1: # no input data file
print 'No input model data file. EXIT'
sys.exit()
# assign parameters that must be preserved throughout the process
if options['mask'] == True:
options['seasonalCycle'] = True
###########################################################################
# Part 1: retrieve observation data from the database
# NB. automatically uses local cache if already retrieved.
###########################################################################
rcmedData = getDataFromRCMED( params, settings, options )
###########################################################################
# Part 2: load in model data from file(s)
###########################################################################
modelData = getDataFromModel( model, settings )
###########################################################################
# Deal with some precipitation specific options
# i.e. adjust units of model data and set plot color bars suitable for precip
###########################################################################
# AG 06/12/1013: Need to revise how we select colormaps in the future
colorbar = None
if options['precip'] == True:
modelData['data'] = modelData['data']*86400. # convert from kgm-2s-1 into mm/day
colorbar = cm.s3pcpn
# set color bar suitable for MODIS cloud data
if params['obsParamId'] == 31:
colorbar = plt.cm.gist_gray
diffcolorbar = cm.GMT_polar
##################################################################################################################
# Extract sub-selection of model data for required time range.
# e.g. a single model file may contain data for 20 years,
# but the user may have selected to only analyse data between 2003 and 2004.
##################################################################################################################
# Make list of indices where modelData['times'] are between params['startTime'] and params['endTime']
modelTimeOverlap = numpy.logical_and((numpy.array(modelData['times'])>=params['startTime']),
(numpy.array(modelData['times'])<=params['endTime']))
# Make subset of modelData['times'] using full list of times and indices calculated above
modelData['times'] = list(numpy.array(modelData['times'])[modelTimeOverlap])
# Make subset of modelData['data'] using full model data and indices calculated above
modelData['data'] = modelData['data'][modelTimeOverlap, :, :]
##################################################################################################################
# Part 3: Temporal regridding
# i.e. model data may be monthly, and observation data may be daily.
# We need to compare like with like so the User Interface asks what time unit the user wants to work with
# e.g. the user may select that they would like to regrid everything to 'monthly' data
# in which case, the daily observational data will be averaged onto monthly data
# so that it can be compared directly with the monthly model data.
##################################################################################################################
print 'Temporal Regridding Started'
if(options['timeRegrid']):
# Run both obs and model data through temporal regridding routine.
# NB. if regridding not required (e.g. monthly time units selected and model data is already monthly),
# then subroutine detects this and returns data untouched.
rcmedData['data'], newObsTimes = process.calc_average_on_new_time_unit(rcmedData['data'],
rcmedData['times'],
unit=options['timeRegrid'])
modelData['data'], newModelTimes = process.calc_average_on_new_time_unit(modelData['data'],
modelData['times'],
unit=options['timeRegrid'])
# Set a new 'times' list which describes the common times used for both model and obs after the regrid.
if newObsTimes == newModelTimes:
times = newObsTimes
###########################################################################
# Catch situations where after temporal regridding the times in model and obs don't match.
# If this occurs, take subset of data from times common to both model and obs only.
# e.g. imagine you are looking at monthly model data,
# the model times are set to the 15th of each month.
# + you are comparing against daily obs data.
# If you set the start date as Jan 1st, 1995 and the end date as Jan 1st, 1996
# -then system will load all model data in this range with the last date as Dec 15th, 1995
# loading the daily obs data from the database will have a last data item as Jan 1st, 1996.
# If you then do temporal regridding of the obs data from daily -> monthly (to match the model)
# Then there will be data for Jan 96 in the obs, but only up to Dec 95 for the model.
# This section of code deals with this situation by only looking at data
# from the common times between model and obs after temporal regridding.
###########################################################################
if newObsTimes != newModelTimes:
print 'Warning: after temporal regridding, times from observations and model do not match'
print 'Check if this is unexpected.'
print 'Proceeding with data from times common in both model and obs.'
# Create empty lists ready to store data
times = []
tempModelData = []
tempObsData = []
# Loop through each time that is common in both model and obs
for commonTime in numpy.intersect1d(newObsTimes, newModelTimes):
# build up lists of times, and model and obs data for each common time
# NB. use lists for data for convenience (then convert to masked arrays at the end)
times.append(newObsTimes[numpy.where(numpy.array(newObsTimes) == commonTime)[0][0]])
tempModelData.append(modelData['data'][numpy.where(numpy.array(newModelTimes) == commonTime)[0][0], :, :])
tempObsData.append(rcmedData['data'][numpy.where(numpy.array(newObsTimes) == commonTime)[0][0], :, :])
# Convert data arrays from list back into full 3d arrays.
modelData['data'] = ma.array(tempModelData)
rcmedData['data'] = ma.array(tempObsData)
# Reset all time lists so representative of the data actually used.
newObsTimes = times
newModelTimes = times
rcmedData['times'] = times
modelData['times'] = times
##################################################################################################################
# Part 4: spatial regridding
# The model and obs are rarely on the same grid.
# To compare the two, you need them to be on the same grid.
# The User Interface asked the user if they'd like to regrid everything to the model grid or the obs grid.
# Alternatively, they could chose to regrid both model and obs onto a third regular lat/lon grid as defined
# by parameters that they enter.
#
# NB. from this point on in the code, the 'lats' and 'lons' arrays are common to
# both rcmedData['data'] and modelData['data'].
##################################################################################################################
##################################################################################################################
# either i) Regrid obs data to model grid.
##################################################################################################################
if options['regrid'] == 'model':
# User chose to regrid observations to the model grid
modelData['data'], rcmedData['data'], lats, lons = process.regrid_wrapper('0', rcmedData['data'],
rcmedData['lats'],
rcmedData['lons'],
modelData['data'],
modelData['lats'],
modelData['lons'])
##################################################################################################################
# or ii) Regrid model data to obs grid.
##################################################################################################################
if options['regrid'] == 'obs':
# User chose to regrid model data to the observation grid
modelData['data'], rcmedData['data'], lats, lons = process.regrid_wrapper('1', rcmedData['data'],
rcmedData['lats'],
rcmedData['lons'],
modelData['data'],
modelData['lats'],
modelData['lons'])
##################################################################################################################
# or iii) Regrid both model data and obs data to new regular lat/lon grid.
##################################################################################################################
if options['regrid'] == 'regular':
# User chose to regrid both model and obs data onto a newly defined regular lat/lon grid
# Construct lats, lons from grid parameters
# Create 1d lat and lon arrays
# AG 06/21/2013: These variables are undefined, where are they generated from?
lat = numpy.arange(nLats)*dLat+Lat0
lon = numpy.arange(nLons)*dLon+Lon0
# Combine 1d lat and lon arrays into 2d arrays of lats and lons
lons, lats = numpy.meshgrid(lon, lat)
###########################################################################################################
# Regrid model data for every time
# NB. store new data in a list and convert back to an array at the end.
###########################################################################################################
tmpModelData = []
timeCount = modelData['data'].shape[0]
for t in numpy.arange(timeCount):
tmpModelData.append(process.do_regrid(modelData['data'][t, :, :],
modelData['lats'][:, :],
modelData['lons'][:, :],
rcmedData['lats'][:, :],
rcmedData['lons'][:, :]))
# Convert list back into a masked array
modelData['data'] = ma.array(tmpModelData)
###########################################################################################################
# Regrid obs data for every time
# NB. store new data in a list and convert back to an array at the end.
###########################################################################################################
tempObsData = []
timeCount = rcmedData['data'].shape[0]
for t in numpy.arange(timeCount):
tempObsData.append(process.do_regrid(rcmedData['data'][t, :, :],
rcmedData['lats'][:, :],
rcmedData['lons'][:, :],
modelData['lats'][:, :], modelData['lons'][:, :]))
# Convert list back into a masked array
rcmedData['data'] = ma.array(tempObsData)
##################################################################################################################
# (Optional) Part 5: area-averaging
#
# RCMET has the ability to either calculate metrics at every grid point,
# or to calculate metrics for quantities area-averaged over a defined (masked) region.
#
# If the user has selected to perform area-averaging,
# then they have also selected how they want to define
# the area to average over.
# The options were:
# -define masked region using regular lat/lon bounding box parameters
# -read in masked region from file
#
# either i) Load in the mask file (if required)
# or ii) Create the mask using latlonbox
# then iii) Do the area-averaging
#
###############################################################################################################
if options['mask'] == True: # i.e. define regular lat/lon box for area-averaging
print 'Using Latitude/Longitude Mask for Area Averaging'
###############################################################################################################
# Define mask using regular lat/lon box specified by users (i.e. ignore regions where mask = True)
###############################################################################################################
mask = numpy.logical_or(numpy.logical_or(lats<=mask['latMin'], lats>=mask['latMax']),
numpy.logical_or(lons<=mask['lonMin'], lons>=mask['lonMax']))
######################m########################################################################################
# Calculate area-weighted averages within this region and store in new lists
###############################################################################################################
modelStore = []
timeCount = modelData['data'].shape[0]
for t in numpy.arange(timeCount):
modelStore.append(process.calc_area_mean(modelData['data'][t, :, :], lats, lons, mymask=mask))
obsStore = []
timeCount = rcmedData['data'].shape[0]
for t in numpy.arange(timeCount):
obsStore.append(process.calc_area_mean(rcmedData['data'][t, :, :], lats, lons, mymask=mask))
###############################################################################################################
# Now overwrite data arrays with the area-averaged values
###############################################################################################################
modelData['data'] = ma.array(modelStore)
rcmedData['data'] = ma.array(obsStore)
###############################################################################################################
# Free-up some memory by overwriting big variables
###############################################################################################################
obsStore = 0
modelStore = 0
##############################################################################################################
# NB. if area-averaging has been performed then the dimensions of the data arrays will have changed from 3D to 1D
# i.e. only one value per time.
##############################################################################################################
##############################################################################################################
# (Optional) Part 6: seasonal cycle compositing
#
# RCMET has the ability to calculate seasonal average values from a long time series of data.
#
# e.g. for monthly data going from Jan 1980 - Dec 2010
# If the user selects to do seasonal cycle compositing,
# this section calculates the mean of all Januarys, mean of all Februarys, mean of all Marchs etc
# -result has 12 times.
#
# NB. this works with incoming 3D data or 1D data (e.g. time series after avea-averaging).
#
# If no area-averaging has been performed in Section 5,
# then the incoming data is 3D, and the outgoing data will also be 3D,
# but with the number of times reduced to 12
# i.e. you will get 12 map plots each one showing the average values for a month. (all Jans, all Febs etc)
#
#
# If area-averaging has been performed in Section 5,
# then the incoming data is 1D, and the outgoing data will also be 1D,
# but with the number of times reduced to 12
# i.e. you will get a time series of 12 data points
# each one showing the average values for a month. (all Jans, all Febs etc).
#
##################################################################################################################
if options['seasonalCycle'] == True:
print 'Compositing data to calculate seasonal cycle'
modelData['data'] = metrics.calcAnnualCycleMeans(modelData['data'])
rcmedData['data'] = metrics.calcAnnualCycleMeans(rcmedData['data'])
##################################################################################################################
# Part 7: metric calculation
# Calculate performance metrics comparing rcmedData['data'] and modelData['data'].
# All output is stored in metricData regardless of what metric was calculated.
#
# NB. the dimensions of metricData will vary depending on the dimensions of the incoming data
# *and* on the type of metric being calculated.
#
# e.g. bias between incoming 1D model and 1D obs data (after area-averaging) will be a single number.
# bias between incoming 3D model and 3D obs data will be 2D, i.e. a map of mean bias.
# correlation coefficient between incoming 3D model and 3D obs data will be 1D time series.
#
##################################################################################################################
if options['metric'] == 'bias':
metricData = metrics.calcBias(modelData['data'], rcmedData['data'])
metricTitle = 'Bias'
if options['metric'] == 'mae':
metricData = metrics.calcBiasAveragedOverTime(modelData['data'], rcmedData['data'], 'abs')
metricTitle = 'Mean Absolute Error'
if options['metric'] == 'rms':
metricData = metrics.calcRootMeanSquaredDifferenceAveragedOverTime(modelData['data'], rcmedData['data'])
metricTitle = 'RMS error'
#if options['metric'] == 'patcor':
#metricData = metrics.calc_pat_cor2D(modelData['data'], rcmedData['data'])
#metricTitle = 'Pattern Correlation'
if options['metric'] == 'pdf':
metricData = metrics.calcPdf(modelData['data'], rcmedData['data'])
metricTitle = 'Probability Distribution Function'
if options['metric'] == 'coe':
metricData = metrics.calcNashSutcliff(modelData['data'], rcmedData['data'])
metricTitle = 'Coefficient of Efficiency'
if options['metric'] == 'stddev':
metricData = metrics.calcTemporalStdev(modelData['data'])
data2 = metrics.calcTemporalStdev(rcmedData['data'])
metricTitle = 'Standard Deviation'
##################################################################################################################
# Part 8: Plot production
#
# Produce plots of metrics and obs, model data.
# Type of plot produced depends on dimensions of incoming data.
# e.g. 1D data is plotted as a time series.
# 2D data is plotted as a map.
# 3D data is plotted as a sequence of maps.
#
##################################################################################################################
##################################################################################################################
# 1 dimensional data, e.g. Time series plots
##################################################################################################################
if metricData.ndim == 1:
print 'Producing time series plots ****'
print metricData
yearLabels = True
# mytitle = 'Area-average model v obs'
################################################################################################################
# If producing seasonal cycle plots, don't want to put year labels on the time series plots.
################################################################################################################
if options['seasonalCycle'] == True:
yearLabels = False
mytitle = 'Annual cycle: area-average model v obs'
# Create a list of datetimes to represent the annual cycle, one per month.
times = []
for m in xrange(12):
times.append(datetime.datetime(2000, m+1, 1, 0, 0, 0, 0))
###############################################################################################
# Special case for pattern correlation plots. TODO: think of a cleaner way of doing this.
# Only produce these plots if the metric is NOT pattern correlation.
###############################################################################################
# TODO - Clean up this if statement. We can use a list of values then ask if not in LIST...
#KDW: change the if statement to if else to accommodate the 2D timeseries plots
if (options['metric'] != 'patcor')&(options['metric'] != 'acc')&(options['metric'] != 'nacc')&(options['metric'] != 'coe')&(options['metric'] != 'pdf'):
# for anomaly and pattern correlation,
# can't plot time series of model, obs as these are 3d fields
# ^^ This is the reason modelData['data'] has been swapped for metricData in
# the following function
# TODO: think of a cleaner way of dealing with this.
###########################################################################################
# Produce the time series plots with two lines: obs and model
###########################################################################################
print 'two line timeseries'
# mytitle = options['plotTitle']
mytitle = 'Area-average model v obs'
if options['plotTitle'] == 'default':
mytitle = metricTitle+' model & obs'
#plots.draw_time_series_plot(modelData['data'],times,options['plotFilename']+'both',
# settings['workDir'],data2=rcmedData['data'],mytitle=mytitle,
# ytitle='Y',xtitle='time',
# year_labels=yearLabels)
plots.draw_time_series_plot(metricData, times, options['plotFilename']+'both',
settings['workDir'], data2, mytitle=mytitle,
ytitle='Y', xtitle='time',
year_labels=yearLabels)
else:
###############################################################################################
# Produce the metric time series plot (one line only)
###############################################################################################
mytitle = options['plotTitle']
if options['plotTitle'] == 'default':
mytitle = metricTitle+' model v obs'
print 'one line timeseries'
plots.draw_time_series_plot(metricData, times, options['plotFilename'],
settings['workDir'], mytitle=mytitle, ytitle='Y', xtitle='time',
year_labels=yearLabels)
###############################################################################################
# 2 dimensional data, e.g. Maps
###############################################################################################
if metricData.ndim == 2:
###########################################################################################
# Calculate color bar ranges for data such that same range is used in obs and model plots
# for like-with-like comparison.
###########################################################################################
mymax = max(rcmedData['data'].mean(axis=0).max(), modelData['data'].mean(axis=0).max())
mymin = min(rcmedData['data'].mean(axis=0).min(), modelData['data'].mean(axis=0).min())
###########################################################################################
# Time title labels need their format adjusting depending on the temporal regridding used,
# e.g. if data are averaged to monthly,
# then want to write 'Jan 2002', 'Feb 2002', etc instead of 'Jan 1st, 2002', 'Feb 1st, 2002'
#
# Also, if doing seasonal cycle compositing
# then want to write 'Jan','Feb','Mar' instead of 'Jan 2002','Feb 2002','Mar 2002' etc
# as data are representative of all Jans, all Febs etc.
###########################################################################################
if(options['timeRegrid'] == 'daily'):
timeFormat = "%b %d, %Y"
if(options['timeRegrid'] == 'monthly'):
timeFormat = "%b %Y"
if(options['timeRegrid'] == 'annual'):
timeFormat = "%Y"
if(options['timeRegrid'] == 'full'):
timeFormat = "%b %d, %Y"
###########################################################################################
# Special case: when plotting bias data, we also like to plot the mean obs and mean model data.
# In this case, we need to calculate new time mean values for both obs and model.
# When doing this time averaging, we also need to deal with missing data appropriately.
#
# Classify missing data resulting from multiple times (using threshold data requirment)
# i.e. if the working time unit is monthly data, and we are dealing with multiple months of data
# then when we show mean of several months, we need to decide what threshold of missing data we tolerate
# before classifying a data point as missing data.
###########################################################################################
###########################################################################################
# Calculate time means of model and obs data
###########################################################################################
modelDataMean = modelData['data'].mean(axis=0)
obsDataMean = rcmedData['data'].mean(axis=0)
###########################################################################################
# Calculate missing data masks using tolerance threshold of missing data going into calculations
###########################################################################################
obsDataMask = process.create_mask_using_threshold(rcmedData['data'], threshold=0.75)
modelDataMask = process.create_mask_using_threshold(modelData['data'], threshold=0.75)
###########################################################################################
# Combine data and masks into masked arrays suitable for plotting.
###########################################################################################
modelDataMean = ma.masked_array(modelDataMean, modelDataMask)
obsDataMean = ma.masked_array(obsDataMean, obsDataMask)
###########################################################################################
# Plot model data
###########################################################################################
mytitle = 'Model data: mean between %s and %s' % ( modelData['times'][0].strftime(timeFormat),
modelData['times'][-1].strftime(timeFormat) )
myfname = os.path.join(options['workDir'], options['plotFilename']+'model')
plots.draw_cntr_map_single(modelDataMean, lats, lons, mymin, mymax, mytitle, myfname, cMap = colorbar)
###########################################################################################
# Plot obs data
###########################################################################################
mytitle = 'Obs data: mean between %s and %s' % ( rcmedData['times'][0].strftime(timeFormat),
rcmedData['times'][-1].strftime(timeFormat) )
myfname = os.path.join(options['workDir'], options['plotFilename']+'obs')
plots.draw_cntr_map_single(obsDataMean, lats, lons, mymin, mymax, mytitle, myfname, cMap = colorbar)
###########################################################################################
# Plot metric
###########################################################################################
mymax = metricData.max()
mymin = metricData.min()
mytitle = options['plotTitle']
if options['plotTitle'] == 'default':
mytitle = metricTitle+' model v obs %s to %s' % ( rcmedData['times'][0].strftime(timeFormat),
rcmedData['times'][-1].strftime(timeFormat) )
myfname = os.path.join(options['workDir'], options['plotFilename'])
plots.draw_cntr_map_single(metricData, lats, lons, mymin, mymax, mytitle, myfname, cMap = diffcolorbar)
###############################################################################################
# 3 dimensional data, e.g. sequence of maps
###############################################################################################
if metricData.ndim == 3:
print 'Generating series of map plots, each for a different time.'
for t in numpy.arange(rcmedData['data'].shape[0]):
#######################################################################################
# Calculate color bar ranges for data such that same range is used in obs and model plots
# for like-with-like comparison.
#######################################################################################
colorRangeMax = max(rcmedData['data'][t, :, :].max(), modelData['data'][t, :, :].max())
colorRangeMin = min(rcmedData['data'][t, :, :].min(), modelData['data'][t, :, :].min())
# Setup the timeTitle
timeSlice = times[t]
timeTitle = createTimeTitle( options, timeSlice, rcmedData, modelData )
#######################################################################################
# Plot model data
#######################################################################################
mytitle = 'Model data: mean '+timeTitle
myfname = os.path.join(settings['workDir'], options['plotFilename']+'model'+str(t))
plots.draw_cntr_map_single(modelData['data'][t, :, :], lats, lons, colorRangeMin, colorRangeMax,
mytitle, myfname, cMap = colorbar)
#######################################################################################
# Plot obs data
#######################################################################################
mytitle = 'Obs data: mean '+timeTitle
myfname = os.path.join(settings['workDir'], options['plotFilename']+'obs'+str(t))
plots.draw_cntr_map_single(rcmedData['data'][t, :, :], lats, lons, colorRangeMin, colorRangeMax,
mytitle, myfname, cMap = colorbar)
#######################################################################################
# Plot metric
#######################################################################################
mytitle = options['plotTitle']
myfname = os.path.join(settings['workDir'], options['plotFilename']+str(t))
if options['plotTitle'] == 'default':
mytitle = metricTitle +' model v obs : '+timeTitle
colorRangeMax = metricData.max()
colorRangeMin = metricData.min()
plots.draw_cntr_map_single(metricData[t, :, :], lats, lons, colorRangeMin, colorRangeMax,
mytitle, myfname, cMap = diffcolorbar)
def getDataFromRCMED( params, settings, options ):
"""
This function takes in the params, settings, and options dictionaries and will return an rcmedData dictionary.
return:
rcmedData = {"lats": 1-d numpy array of latitudes,
"lons": 1-d numpy array of longitudes,
"levels": 1-d numpy array of height/pressure levels (surface based data will have length == 1),
"times": list of python datetime objects,
"data": masked numpy arrays of data values}
"""
rcmedData = {}
obsLats, obsLons, obsLevs, obsTimes, obsData = db.extractData(params['obsDatasetId'],
params['obsParamId'],
params['latMin'],
params['latMax'],
params['lonMin'],
params['lonMax'],
params['startTime'],
params['endTime'],
settings['cacheDir'],
options['timeRegrid'])
rcmedData['lats'] = obsLats
rcmedData['lons'] = obsLons
rcmedData['levels'] = obsLevs
rcmedData['times'] = obsTimes
rcmedData['data'] = obsData
return rcmedData
def getDataFromModel( model, settings ):
"""
This function takes in the model and settings dictionaries and will return a model data dictionary.
return:
model = {"lats": 1-d numpy array of latitudes,
"lons": 1-d numpy array of longitudes,
"times": list of python datetime objects,
"data": numpy array containing data from all files}
"""
model = files.read_data_from_file_list(settings['fileList'],
model['varName'],
model['timeVariable'],
model['latVariable'],
model['lonVariable'])
return model
##################################################################################################################
# Processing complete
##################################################################################################################
def createTimeTitle( options, timeSlice, rcmedData, modelData ):
"""
Function that takes in the options dictionary and a specific timeSlice.
Return: string timeTitle properly formatted based on the 'timeRegrid' and 'seasonalCycle' options value.
Time title labels need their format adjusting depending on the temporal regridding used
e.g. if data are averaged to monthly, then want to write 'Jan 2002',
'Feb 2002', etc instead of 'Jan 1st, 2002', 'Feb 1st, 2002'
Also, if doing seasonal cycle compositing then want to write 'Jan','Feb',
'Mar' instead of 'Jan 2002', 'Feb 2002','Mar 2002' etc as data are
representative of all Jans, all Febs etc.
"""
if(options['timeRegrid'] == 'daily'):
timeTitle = timeSlice.strftime("%b %d, %Y")
if options['seasonalCycle'] == True:
timeTitle = timeSlice.strftime("%b %d (all years)")
if(options['timeRegrid'] == 'monthly'):
timeTitle = timeSlice.strftime("%b %Y")
if options['seasonalCycle'] == True:
timeTitle = timeSlice.strftime("%b (all years)")
if(options['timeRegrid'] == 'annual'):
timeTitle = timeSlice.strftime("%Y")
if(options['timeRegrid'] == 'full'):
minTime = min(min(rcmedData['times']), min(modelData['times']))
maxTime = max(max(rcmedData['times']), max(modelData['times']))
timeTitle = minTime.strftime("%b %d, %Y")+' to '+maxTime.strftime("%b %d, %Y")
return timeTitle
| apache-2.0 |
thientu/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
automl/SpySMAC | cave/analyzer/feature_analysis/feature_importance.py | 1 | 2224 | import os
from pandas import DataFrame
from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.utils.helpers import check_for_features
from cave.utils.hpbandster_helpers import format_budgets
class FeatureImportance(BaseAnalyzer):
def __init__(self,
runscontainer,
):
super().__init__(runscontainer)
check_for_features(runscontainer.scenario)
formatted_budgets = format_budgets(self.runscontainer.get_budgets())
for budget, run in zip(self.runscontainer.get_budgets(),
self.runscontainer.get_aggregated(keep_budgets=True, keep_folders=False)):
feat_imp, plots = self.feature_importance(
pimp=run.pimp,
output_dir=run.output_dir,
)
self.result[formatted_budgets[budget]] = plots
# Add to run so other analysis-methods can use the information
run.share_information['feature_importance'] = feat_imp
def get_name(self):
return "Feature Importance"
def feature_importance(self, pimp, output_dir):
self.logger.info("... plotting feature importance")
old_values = (pimp.forwardsel_feat_imp, pimp._parameters_to_evaluate, pimp.forwardsel_cv)
pimp.forwardsel_feat_imp = True
pimp._parameters_to_evaluate = -1
pimp.forwardsel_cv = False
dir_ = os.path.join(output_dir, 'feature_plots/importance')
os.makedirs(dir_, exist_ok=True)
res = pimp.evaluate_scenario(['forward-selection'], dir_)
feat_importance = res[0]['forward-selection']['imp']
plots = [os.path.join(dir_, 'forward-selection-barplot.png'),
os.path.join(dir_, 'forward-selection-chng.png')]
# Restore values
pimp.forwardsel_feat_imp, pimp._parameters_to_evaluate, pimp.forwardsel_cv = old_values
table = DataFrame(data=list(feat_importance.values()), index=list(feat_importance.keys()), columns=["Error"])
table = table.to_html()
result = {'Table': {'table': table}}
for p in plots:
result[os.path.splitext(os.path.basename(p))[0]] = {'figure': p}
return feat_importance, result
| bsd-3-clause |
mfitzp/padua | padua/filters.py | 1 | 7556 | import numpy as np
import pandas as pd
def remove_rows_matching(df, column, match):
"""
Return a ``DataFrame`` with rows where `column` values match `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that match are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
"""
df = df.copy()
mask = df[column].values != match
return df.iloc[mask, :]
def remove_rows_containing(df, column, match):
"""
Return a ``DataFrame`` with rows where `column` values containing `match` are removed.
The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared
to `match`, and those rows that contain it are removed from the DataFrame.
:param df: Pandas ``DataFrame``
:param column: Column indexer
:param match: ``str`` match target
:return: Pandas ``DataFrame`` filtered
"""
df = df.copy()
mask = [match not in str(v) for v in df[column].values]
return df.iloc[mask, :]
def remove_reverse(df):
"""
Remove rows with a + in the 'Reverse' column.
Return a ``DataFrame`` where rows where there is a "+" in the column 'Reverse' are removed.
Filters data to remove peptides matched as reverse.
:param df: Pandas ``DataFrame``
:return: filtered Pandas ``DataFrame``
"""
return remove_rows_containing(df, 'Reverse', '+')
def remove_contaminants(df):
"""
Remove rows with a + in the 'Contaminants' column
Return a ``DataFrame`` where rows where there is a "+" in the column 'Contaminants' are removed.
Filters data to remove peptides matched as reverse.
:param df: Pandas ``DataFrame``
:return: filtered Pandas ``DataFrame``
"""
colname = (df.columns & ['Contaminant','Potential contaminant'])[0]
return remove_rows_matching(df, colname, '+')
def remove_only_identified_by_site(df):
"""
Remove rows with a + in the 'Only identified by site' column
Return a ``DataFrame`` where rows where there is a "+" in the column 'Only identified by site' are removed.
Filters data to remove peptides matched as reverse.
:param df: Pandas ``DataFrame``
:return: filtered Pandas ``DataFrame``
"""
return remove_rows_matching(df, 'Only identified by site', '+')
def filter_localization_probability(df, threshold=0.75):
"""
Remove rows with a localization probability below 0.75
Return a ``DataFrame`` where the rows with a value < `threshold` (default 0.75) in column 'Localization prob' are removed.
Filters data to remove poorly localized peptides (non Class-I by default).
:param df: Pandas ``DataFrame``
:param threshold: Cut-off below which rows are discarded (default 0.75)
:return: Pandas ``DataFrame``
"""
df = df.copy()
localization_probability_mask = df['Localization prob'].values >= threshold
return df.iloc[localization_probability_mask, :]
def minimum_valid_values_in_any_group(df, levels=None, n=1, invalid=np.nan):
"""
Filter ``DataFrame`` by at least n valid values in at least one group.
Taking a Pandas ``DataFrame`` with a ``MultiIndex`` column index, filters rows to remove
rows where there are less than `n` valid values per group. Groups are defined by the `levels` parameter indexing
into the column index. For example, a ``MultiIndex`` with top and second level Group (A,B,C) and Replicate (1,2,3) using
``levels=[0,1]`` would filter on `n` valid values per replicate. Alternatively, ``levels=[0]`` would filter on `n`
valid values at the Group level only, e.g. A, B or C.
By default valid values are determined by `np.nan`. However, alternatives can be supplied via `invalid`.
:param df: Pandas ``DataFrame``
:param levels: ``list`` of ``int`` specifying levels of column ``MultiIndex`` to group by
:param n: ``int`` minimum number of valid values threshold
:param invalid: matching invalid value
:return: filtered Pandas ``DataFrame``
"""
df = df.copy()
if levels is None:
if 'Group' in df.columns.names:
levels = [df.columns.names.index('Group')]
# Filter by at least 7 (values in class:timepoint) at least in at least one group
if invalid is np.nan:
dfx = ~np.isnan(df)
else:
dfx = df != invalid
dfc = dfx.astype(int).sum(axis=1, level=levels)
dfm = dfc.max(axis=1) >= n
mask = dfm.values
return df.iloc[mask, :]
def search(df, match, columns=['Proteins','Protein names','Gene names']):
"""
Search for a given string in a set of columns in a processed ``DataFrame``.
Returns a filtered ``DataFrame`` where `match` is contained in one of the `columns`.
:param df: Pandas ``DataFrame``
:param match: ``str`` to search for in columns
:param columns: ``list`` of ``str`` to search for match
:return: filtered Pandas ``DataFrame``
"""
df = df.copy()
dft = df.reset_index()
mask = np.zeros((dft.shape[0],), dtype=bool)
idx = ['Proteins','Protein names','Gene names']
for i in idx:
if i in dft.columns:
mask = mask | np.array([match in str(l) for l in dft[i].values])
return df.iloc[mask]
def filter_exclude(df, s):
"""
Filter dataframe to exclude matching columns, based on search for "s"
:param s: string to search for, exclude matching columns
"""
keep = ~np.array( [s in c for c in df.columns.values] )
return df.iloc[:, keep]
def filter_select_columns_intensity(df, prefix, columns):
"""
Filter dataframe to include specified columns, retaining any Intensity columns.
"""
# Note: I use %s.+ (not %s.*) so it forces a match with the prefix string, ONLY if it is followed by something.
return df.filter(regex='^(%s.+|%s)$' % (prefix, '|'.join(columns)) )
def filter_select_columns_ratio(df, columns):
"""
Filter dataframe to include specified columns, retaining Ratio columns.
"""
return df.filter(regex='^(Ratio ./. normalized.*|%s)$' % ('|'.join(columns)) )
def filter_intensity(df, label="", with_multiplicity=False):
"""
Filter to include only the Intensity values with optional specified label, excluding other
Intensity measurements, but retaining all other columns.
"""
label += ".*__\d" if with_multiplicity else ""
dft = df.filter(regex="^(?!Intensity).*$")
dfi = df.filter(regex='^(.*Intensity.*%s.*__\d)$' % label)
return pd.concat([dft,dfi], axis=1)
def filter_intensity_lfq(df, label="", with_multiplicity=False):
"""
Filter to include only the Intensity values with optional specified label, excluding other
Intensity measurements, but retaining all other columns.
"""
label += ".*__\d" if with_multiplicity else ""
dft = df.filter(regex="^(?!LFQ Intensity).*$")
dfi = df.filter(regex='^(.*LFQ Intensity.*%s.*__\d)$' % label)
return pd.concat([dft,dfi], axis=1)
def filter_ratio(df, label="", with_multiplicity=False):
"""
Filter to include only the Ratio values with optional specified label, excluding other
Intensity measurements, but retaining all other columns.
"""
label += ".*__\d" if with_multiplicity else ""
dft = df.filter(regex="^(?!Ratio).*$")
dfr = df.filter(regex='^(.*Ratio.*%s)$' % label)
return pd.concat([dft,dfr], axis=1)
| bsd-2-clause |
jaidevd/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
ifuding/Kaggle | SVPC/Code/philly/RankGauss.py | 1 | 3627 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
import os
import numpy as np
import pandas as pd
import scipy.stats as ss
# import dask.dataframe as dd
# from dask.multiprocessing import get
import concurrent.futures
def rank_INT(series, c=3.0/8, stochastic=True):
""" Perform rank-based inverse normal transformation on pandas series.
If stochastic is True ties are given rank randomly, otherwise ties will
share the same value. NaN values are ignored.
Args:
param1 (pandas.Series): Series of values to transform
param2 (Optional[float]): Constand parameter (Bloms constant)
param3 (Optional[bool]): Whether to randomise rank of ties
Returns:
pandas.Series
"""
# Check input
assert(isinstance(series, pd.Series))
assert(isinstance(c, float))
assert(isinstance(stochastic, bool))
# Set seed
np.random.seed(123)
# Take original series indexes
orig_idx = series.index
# Drop NaNs
# series = series.loc[~pd.isnull(series)]
# Get ranks
if stochastic == True:
# Shuffle by index
series = series.loc[np.random.permutation(series.index)]
# Get rank, ties are determined by their position in the series (hence
# why we randomised the series)
rank = ss.rankdata(series, method="ordinal")
else:
# Get rank, ties are averaged
rank = ss.rankdata(series, method="average")
transformed = rank_to_normal(rank, c, len(rank))
# Convert numpy array back to series
# rank = pd.Series(rank, index=series.index)
# # Convert rank to normal distribution
# transformed = rank_to_normal(rank, c, len(rank)) #rank.apply(rank_to_normal, c=c, n=len(rank))
return pd.Series(transformed, index=series.index) #[orig_idx] #.values
def rank_to_normal(rank, c, n):
# Standard quantile function
x = (rank - c) / (n - 2*c + 1)
return ss.norm.ppf(x)
def rank_INT_DF(df):
# ddata = dd.from_pandas(df.T, npartitions=8, sort = False)
# return ddata.map_partitions(lambda df: df.apply(rank_INT, axis = 1)).compute().T
MAX_WORKERS = 16
cols = df.columns.values
print(cols)
col_ind_begin = 0
col_len = cols.shape[0]
while col_ind_begin < col_len:
col_ind_end = min(col_ind_begin + MAX_WORKERS, col_len)
with concurrent.futures.ThreadPoolExecutor(max_workers = MAX_WORKERS) as executor:
future_predict = {executor.submit(rank_INT, df[cols[ind]]): ind for ind in range(col_ind_begin, col_ind_end)}
for future in concurrent.futures.as_completed(future_predict):
ind = future_predict[future]
try:
df[cols[ind]] = future.result()
except Exception as exc:
print('%dth feature normalize generate an exception: %s' % (ind, exc))
col_ind_begin = col_ind_end
if col_ind_begin % 100 == 0:
print('Gen %d normalized features' % col_ind_begin)
return df
def test():
# Test
s = pd.Series(np.random.randint(1, 10, 6), index=["a", "b", "c", "d", "e", "f"])
print(s)
res = rank_INT_DF(s)
print(res)
return 0
def test_df():
# Test
s = pd.DataFrame({'c0': np.random.randint(1, 10, 6), 'c1': np.random.randint(1, 10, 6)}, index=["a", "b", "c", "d", "e", "f"])
print(s)
res = rank_INT_DF(s)
print(res)
return 0
if __name__ == '__main__':
test_df() | apache-2.0 |
sgraham/nope | chrome/test/data/nacl/gdb_rsp.py | 99 | 2431 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is based on gdb_rsp.py file from NaCl repository.
import re
import socket
import time
def RspChecksum(data):
checksum = 0
for char in data:
checksum = (checksum + ord(char)) % 0x100
return checksum
class GdbRspConnection(object):
def __init__(self, addr):
self._socket = self._Connect(addr)
def _Connect(self, addr):
# We have to poll because we do not know when sel_ldr has
# successfully done bind() on the TCP port. This is inherently
# unreliable.
# TODO(mseaborn): Add a more reliable connection mechanism to
# sel_ldr's debug stub.
timeout_in_seconds = 10
poll_time_in_seconds = 0.1
for i in xrange(int(timeout_in_seconds / poll_time_in_seconds)):
# On Mac OS X, we have to create a new socket FD for each retry.
sock = socket.socket()
try:
sock.connect(addr)
except socket.error:
# Retry after a delay.
time.sleep(poll_time_in_seconds)
else:
return sock
raise Exception('Could not connect to sel_ldr\'s debug stub in %i seconds'
% timeout_in_seconds)
def _GetReply(self):
reply = ''
while True:
data = self._socket.recv(1024)
if len(data) == 0:
raise AssertionError('EOF on socket reached with '
'incomplete reply message: %r' % reply)
reply += data
if '#' in data:
break
match = re.match('\+\$([^#]*)#([0-9a-fA-F]{2})$', reply)
if match is None:
raise AssertionError('Unexpected reply message: %r' % reply)
reply_body = match.group(1)
checksum = match.group(2)
expected_checksum = '%02x' % RspChecksum(reply_body)
if checksum != expected_checksum:
raise AssertionError('Bad RSP checksum: %r != %r' %
(checksum, expected_checksum))
# Send acknowledgement.
self._socket.send('+')
return reply_body
# Send an rsp message, but don't wait for or expect a reply.
def RspSendOnly(self, data):
msg = '$%s#%02x' % (data, RspChecksum(data))
return self._socket.send(msg)
def RspRequest(self, data):
self.RspSendOnly(data)
return self._GetReply()
def RspInterrupt(self):
self._socket.send('\x03')
return self._GetReply()
| bsd-3-clause |
pllim/ginga | ginga/examples/matplotlib/example3_mpl.py | 3 | 13151 | #! /usr/bin/env python
#
# example3_mpl.py -- Copy attributes from a Ginga Qt widget into a Matplotlib
# figure.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
#
"""
$ ./example3_mpl.py [fits file]
example3 displays a native ginga widget beside a matplotlib figure as two
panes. A fits file can be dropped into the left pane and manipulated using
the standard Ginga interactive controls
see (http://ginga.readthedocs.io/en/latest/quickref.html).
Drop down boxes allow the color map to be changed.
The right pane has two buttons under it: pressing each button sets up a
different kind of plot in the mpl pane based on the current state of the
ginga pane.
You need Qt4 with python bindings (or pyside) installed to run this example.
"""
import sys
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from ginga.qtw.ImageViewCanvasQt import ImageViewCanvas
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga import cmap, imap
from ginga.misc import log
from ginga.util.loader import load_data
STD_FORMAT = '%(asctime)s | %(levelname)1.1s | %(filename)s:%(lineno)d (%(funcName)s) | %(message)s'
class FitsViewer(QtGui.QMainWindow):
def __init__(self, logger):
super(FitsViewer, self).__init__()
self.logger = logger
menubar = self.menuBar()
# create a File pulldown menu, and add it to the menu bar
filemenu = menubar.addMenu("File")
item = QtGui.QAction("Open File", menubar)
item.triggered.connect(self.open_file)
filemenu.addAction(item)
sep = QtGui.QAction(menubar)
sep.setSeparator(True)
filemenu.addAction(sep)
item = QtGui.QAction("Quit", menubar)
item.triggered.connect(self.close)
filemenu.addAction(item)
# Add matplotlib color maps to our built in ones
cmap.add_matplotlib_cmaps()
self.cmaps = cmap.get_names()
self.imaps = imap.get_names()
wd, ht = 500, 500
# Create a Ginga widget
fi = ImageViewCanvas(logger, render='widget')
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.enable_draw(False)
fi.set_callback('drag-drop', self.drop_file_cb)
fi.set_callback('cursor-changed', self.cursor_cb)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_set_active(True)
self.fitsimage = fi
fi.show_color_bar(True)
# enable various key and mouse controlled actions
bd = fi.get_bindings()
bd.enable_all(True)
self.cp_tag = 'compass'
# pack widget into layout
gingaw = fi.get_widget()
gingaw.resize(wd, ht)
vbox1 = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
layout.addWidget(gingaw, stretch=1)
self.cm = cmap.get_cmap('gray')
self.im = imap.get_imap('ramp')
# color map selection widget
wcmap = QtGui.QComboBox()
for name in self.cmaps:
wcmap.addItem(name)
index = self.cmaps.index('gray')
wcmap.setCurrentIndex(index)
wcmap.activated.connect(self.set_cmap_cb)
self.wcmap = wcmap
# intensity map selection widget
wimap = QtGui.QComboBox()
for name in self.imaps:
wimap.addItem(name)
index = self.imaps.index('ramp')
wimap.setCurrentIndex(index)
wimap.activated.connect(self.set_cmap_cb)
self.wimap = wimap
#wopen = QtGui.QPushButton("Open File")
#wopen.clicked.connect(self.open_file)
# add buttons to layout
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
hbox.addStretch(1)
for w in (wcmap, wimap):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
layout.addWidget(hw, stretch=0)
vbox1.setLayout(layout)
# Create a matplotlib Figure
#self.fig = matplotlib.figure.Figure(figsize=(wd, ht))
self.fig = matplotlib.figure.Figure()
self.canvas = FigureCanvas(self.fig)
vbox2 = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
layout.addWidget(self.canvas, stretch=1)
# Add matplotlib buttons
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
wgetimg = QtGui.QPushButton("Get Data")
wgetimg.clicked.connect(self.get_image)
wgetrgb = QtGui.QPushButton("Get RGB")
wgetrgb.clicked.connect(self.get_rgb_image)
#wquit = QtGui.QPushButton("Quit")
#wquit.clicked.connect(self.close)
hbox.addStretch(1)
for w in (wgetimg, wgetrgb):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
layout.addWidget(hw, stretch=0)
vbox2.setLayout(layout)
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(QtCore.QMargins(2, 2, 2, 2))
vbox.setSpacing(1)
w = QtGui.QWidget()
layout = QtGui.QHBoxLayout()
layout.addWidget(vbox1, stretch=1.0)
layout.addWidget(vbox2, stretch=1.0)
w.setLayout(layout)
vbox.addWidget(w, stretch=1)
self.readout = QtGui.QLabel("")
vbox.addWidget(self.readout, stretch=0,
alignment=QtCore.Qt.AlignCenter)
vw = QtGui.QWidget()
vw.setLayout(vbox)
self.setCentralWidget(vw)
def set_cmap_cb(self, kind):
index = self.wcmap.currentIndex()
cmap_name = self.cmaps[index]
self.cm = cmap.get_cmap(cmap_name)
index = self.wimap.currentIndex()
imap_name = self.imaps[index]
self.im = imap.get_imap(imap_name)
self.fitsimage.set_cmap(self.cm)
self.fitsimage.set_imap(self.im)
def clear_canvas(self):
self.fitsimage.delete_all_objects()
def load_file(self, filepath):
image = load_data(filepath, logger=self.logger)
self.fitsimage.set_image(image)
self.setWindowTitle(filepath)
# create compass
try:
try:
self.fitsimage.delete_object_by_tag(self.cp_tag)
except KeyError:
pass
width, height = image.get_size()
x, y = width / 2.0, height / 2.0
# radius we want the arms to be (approx 1/4 the largest dimension)
radius = float(max(width, height)) / 4.0
Compass = self.fitsimage.get_draw_class('compass')
self.fitsimage.add(Compass(x, y, radius, color='skyblue',
fontsize=14), tag=self.cp_tag)
except Exception as e:
self.logger.warning("Can't calculate compass: %s" % (
str(e)))
def open_file(self):
res = QtGui.QFileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(fileName)
def drop_file_cb(self, viewer, paths):
filename = paths[0]
self.load_file(filename)
def closeEvent(self, ce):
self.close()
def cursor_cb(self, viewer, button, data_x, data_y):
"""This gets called when the data position relative to the cursor
changes.
"""
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = viewer.get_data(int(data_x + viewer.data_off),
int(data_y + viewer.data_off))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = viewer.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.setText(text)
def calculate_aspect(self, shape, extent):
dx = abs(extent[1] - extent[0]) / float(shape[1])
dy = abs(extent[3] - extent[2]) / float(shape[0])
return dx / dy
def make_mpl_colormap(self, fitsimage):
# make the equivalent color map for matplotlib
rgbmap = fitsimage.get_rgbmap()
cm = rgbmap.get_cmap()
mpl_cm = cmap.ginga_to_matplotlib_cmap(cm)
return mpl_cm
def get_wcs_extent(self, image, x0, y0, x1, y1):
# WCS of the area
ra0, dec0 = image.pixtoradec(x0, y0, format='deg', coords='data')
ra1, dec1 = image.pixtoradec(x1, y1, format='deg', coords='data')
extent = (ra0, ra1, dec0, dec1)
return extent
def get_rgb_image(self):
fi = self.fitsimage
# clear previous image
self.fig.clf()
# Grab the RGB array for the current image and place it in the
# matplotlib figure axis
arr = fi.getwin_array(order='RGB')
# force aspect ratio of figure to match
wd, ht = fi.get_window_size()
# Get the data extents
x0, y0 = fi.get_data_xy(0, 0)
x1, y1 = fi.get_data_xy(wd - 1, ht - 1)
flipx, flipy, swapxy = fi.get_transforms()
if swapxy:
x0, x1, y0, y1 = y0, y1, x0, x1
xlabel = 'dec'
ylabel = 'ra'
else:
xlabel = 'ra'
ylabel = 'dec'
#extent = (x0, x1, y1, y0)
image = fi.get_image()
extent = self.get_wcs_extent(image, x0, x1, y1, y0)
#print "extent=%s" % (str(extent))
# Calculate aspect ratio
aspect = self.calculate_aspect(arr.shape, extent)
#ax = self.fig.add_subplot(111, adjustable='box', aspect=aspect)
ax = self.fig.add_subplot(111)
ax.autoscale(True, tight=True)
ax.set_anchor('C')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# make the equivalent color map for matplotlib
self.make_mpl_colormap(fi)
ax.imshow(arr, interpolation="nearest", origin="upper",
vmin=0, vmax=255,
extent=extent,
aspect=aspect)
# force an update of the figure
self.fig.canvas.draw()
def get_image(self):
fi = self.fitsimage
# clear previous image
self.fig.clf()
ax = self.fig.add_subplot(111)
ax.autoscale(True, tight=True)
x0, y0, x1, y1 = tuple(map(int, fi.get_datarect()))
#extent = (x0, x1, y0, y1)
image = fi.get_image()
arr = image.cutout_data(x0, y0, x1, y1)
extent = self.get_wcs_extent(image, x0, y0, x1, y1)
# get cut levels
loval, hival = fi.get_cut_levels()
# make the equivalent color map for matplotlib
cm = self.make_mpl_colormap(fi)
# add the image to the figure
interp = 'nearest'
img = ax.imshow(arr, interpolation=interp, origin="lower",
vmin=loval, vmax=hival, cmap=cm,
aspect="equal", extent=extent)
# add a colorbar
self.fig.colorbar(img, orientation='vertical')
# force an update of the figure
self.fig.canvas.draw()
def main(options, args):
app = QtGui.QApplication(args)
logger = log.get_logger(name="example3", options=options)
w = FitsViewer(logger)
w.resize(1024, 540)
w.show()
app.setActiveWindow(w)
w.raise_()
w.activateWindow()
if len(args) > 0:
w.load_file(args[0])
app.exec_()
if __name__ == "__main__":
# Parse command line options
from argparse import ArgumentParser
argprs = ArgumentParser()
argprs.add_argument("--debug", dest="debug", default=False,
action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
log.addlogopts(argprs)
(options, args) = argprs.parse_known_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
| bsd-3-clause |
michrawson/nyu_ml_lectures | notebooks/figures/plot_interactive_tree.py | 20 | 2317 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals.six import StringIO # doctest: +SKIP
from sklearn.tree import export_graphviz
from scipy.misc import imread
from scipy import ndimage
import re
X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)
def tree_image(tree, fout=None):
try:
import pydot
except ImportError:
# make a hacky white plot
x = np.ones((10, 10))
x[0, 0] = 0
return x
dot_data = StringIO()
export_graphviz(tree, out_file=dot_data)
data = re.sub(r"gini = 0\.[0-9]+\\n", "", dot_data.getvalue())
data = re.sub(r"samples = [0-9]+\\n", "", data)
data = re.sub(r"\\nsamples = [0-9]+", "", data)
graph = pydot.graph_from_dot_data(data)
if fout is None:
fout = "tmp.png"
graph.write_png(fout)
return imread(fout)
def plot_tree(max_depth=1):
fig, ax = plt.subplots(1, 2, figsize=(15, 7))
h = 0.02
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
if max_depth != 0:
tree = DecisionTreeClassifier(max_depth=max_depth, random_state=1).fit(X, y)
Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
faces = tree.tree_.apply(np.c_[xx.ravel(), yy.ravel()].astype(np.float32))
faces = faces.reshape(xx.shape)
border = ndimage.laplace(faces) != 0
ax[0].contourf(xx, yy, Z, alpha=.4)
ax[0].scatter(xx[border], yy[border], marker='.', s=1)
ax[0].set_title("max_depth = %d" % max_depth)
ax[1].imshow(tree_image(tree))
ax[1].axis("off")
else:
ax[0].set_title("data set")
ax[1].set_visible(False)
ax[0].scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
ax[0].set_xlim(x_min, x_max)
ax[0].set_ylim(y_min, y_max)
ax[0].set_xticks(())
ax[0].set_yticks(())
def plot_tree_interactive():
from IPython.html.widgets import interactive, IntSlider
slider = IntSlider(min=0, max=8, step=1, value=0)
return interactive(plot_tree, max_depth=slider)
| cc0-1.0 |
masfaraud/volmdlr | volmdlr/edges.py | 1 | 132707 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from packaging import version
import math
import numpy as npy
import scipy as scp
from geomdl import utilities
from geomdl import BSpline
from geomdl.operations import length_curve, split_curve
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import __version__ as _mpl_version
import matplotlib.pyplot as plt
import matplotlib.patches
from typing import List
import dessia_common as dc
import volmdlr.core
import volmdlr.geometry
import plot_data.core as plot_data
def standardize_knot_vector(knot_vector):
u0 = knot_vector[0]
u1 = knot_vector[-1]
standard_u_knots = []
if u0 != 0 or u1 != 1:
x = 1 / (u1 - u0)
y = u0 / (u0 - u1)
for u in knot_vector:
standard_u_knots.append(u * x + y)
return standard_u_knots
else:
return knot_vector
class Edge(dc.DessiaObject):
def __init__(self, start, end, name=''):
self.start = start
self.end = end
dc.DessiaObject.__init__(self, name=name)
def __getitem__(self, key):
if key == 0:
return self.start
elif key == 1:
return self.end
else:
raise IndexError
def polygon_points(self, min_x_density=None, min_y_density=None):
n = 0 # Number of points to insert between start and end
if min_x_density:
dx = abs(self.start[0] - self.end[0])
n = max(n, math.floor(dx * min_x_density))
if min_y_density:
dy = abs(self.start[1] - self.end[1])
n = max(n, math.floor(dy * min_y_density))
if n:
l = self.length()
return [self.point_at_abscissa(i * l / (n + 1)) for i in
range(n + 2)]
else:
return [self.start, self.end]
@classmethod
def from_step(cls, arguments, object_dict):
if object_dict[arguments[3]].__class__.__name__ == 'Line3D':
return LineSegment3D(object_dict[arguments[1]],
object_dict[arguments[2]], arguments[0][1:-1])
elif object_dict[arguments[3]].__class__.__name__ == 'Circle3D':
# We supposed that STEP file is reading on trigo way
circle = object_dict[arguments[3]]
p1 = object_dict[arguments[1]]
p2 = object_dict[arguments[2]]
if p1 == p2:
return FullArc3D(circle.frame.origin, p1, circle.frame.w)
else:
p1, p2 = p2, p1
circle.frame.normalize()
interior3d = volmdlr.core.clockwise_interior_from_circle3d(
p1, p2, circle)
return volmdlr.edges.Arc3D(p1, interior3d, p2,
name=arguments[0][1:-1])
elif object_dict[arguments[3]].__class__ is volmdlr.wires.Ellipse3D:
majorax = object_dict[arguments[3]].major_axis
minorax = object_dict[arguments[3]].minor_axis
center = object_dict[arguments[3]].center
normal = object_dict[arguments[3]].normal
normal.normalize()
majordir = object_dict[arguments[3]].major_dir
majordir.normalize()
minordir = normal.cross(majordir)
minordir.normalize()
frame = volmdlr.Frame3D(center, majordir, minordir, normal)
p1 = object_dict[
arguments[1]] # on part du principe que p1 suivant majordir
p2 = object_dict[arguments[2]]
if p1 == p2:
angle = 5 * math.pi / 4
xtra = volmdlr.Point3D((majorax * math.cos(math.pi / 2),
minorax * math.sin(math.pi / 2), 0))
extra = frame.old_coordinates(xtra)
else:
extra = None
## Positionnement des points dans leur frame
p1_new, p2_new = frame.new_coordinates(
p1), frame.new_coordinates(p2)
# Angle pour le p1
u1, u2 = p1_new.vector[0] / majorax, p1_new.vector[1] / minorax
theta1 = volmdlr.sin_cos_angle(u1, u2)
# Angle pour le p2
u3, u4 = p2_new.vector[0] / majorax, p2_new.vector[1] / minorax
theta2 = volmdlr.sin_cos_angle(u3, u4)
if theta1 > theta2: # sens trigo
angle = math.pi + (theta1 + theta2) / 2
else:
angle = (theta1 + theta2) / 2
p_3 = volmdlr.Point3D(
(majorax * math.cos(angle), minorax * math.sin(angle), 0))
p3 = frame.old_coordinates(p_3)
arcellipse = ArcEllipse3D(p1, p3, p2, center, majordir, normal,
arguments[0][1:-1], extra)
return arcellipse
elif object_dict[arguments[3]].__class__.__name__ == 'BSplineCurve3D':
# BSplineCurve3D à couper à gauche et à droite avec les points ci dessus ?
return object_dict[arguments[3]]
else:
raise NotImplementedError(
'Unsupported: {}'.format(object_dict[arguments[3]]))
class Line(dc.DessiaObject):
"""
Abstract class
"""
def __init__(self, point1, point2, name=''):
self.point1 = point1
self.point2 = point2
dc.DessiaObject.__init__(self, name=name)
def __getitem__(self, key):
if key == 0:
return self.point1
elif key == 1:
return self.point2
else:
raise IndexError
def unit_direction_vector(self, abscissa=0.):
u = self.direction_vector()
u.normalize()
return u
def direction_vector(self, abscissa=0.):
return self.point2 - self.point1
def normal_vector(self, abscissa=0.):
return self.direction_vector().normal_vector()
def unit_normal_vector(self, abscissa=0.):
return self.unit_direction_vector().normal_vector()
def point_projection(self, point):
u = self.point2 - self.point1
norm_u = u.norm()
t = (point - self.point1).dot(u) / norm_u ** 2
projection = self.point1 + t * u
return projection, t * norm_u
def abscissa(self, point):
u = self.point2 - self.point1
norm_u = u.norm()
t = (point - self.point1).dot(u) / norm_u
return t
def split(self, split_point):
return [self.__class__(self.point1, split_point),
self.__class__(split_point, self.point2)]
class LineSegment(Edge):
"""
Abstract class
"""
def abscissa(self, point):
u = self.end - self.start
length = u.norm()
t = (point - self.start).dot(u) / length
if t < -1e-9 or t > length + 1e-9:
raise ValueError(
'Point is not on linesegment: abscissa={}'.format(t))
return t
def unit_direction_vector(self, abscissa=0.):
u = self.end - self.start
u.normalize()
return u
def direction_vector(self, s=0):
'''
'''
return self.unit_direction_vector()
# return self.end - self.start
def normal_vector(self, abscissa=0.):
return self.unit_direction_vector().normal_vector()
def point_projection(self, point):
p1, p2 = self.points
u = p2 - p1
norm_u = u.norm()
t = (point - p1).dot(u) / norm_u ** 2
projection = p1 + t * u
return projection, t * norm_u
def split(self, split_point):
return [self.__class__(self.start, split_point),
self.__class__(split_point, self.end)]
class Line2D(Line):
"""
Define an infinite line given by two points.
"""
def __init__(self, point1, point2, *, name=''):
self.points = [point1, point2]
Line.__init__(self, point1, point2, name=name)
def to_3d(self, plane_origin, x1, x2):
p3D = [p.to_3d(plane_origin, x1, x2) for p in self.points]
return Line2D(*p3D, self.name)
def rotation(self, center, angle, copy=True):
if copy:
return Line2D(
*[p.rotation(center, angle, copy=True) for p in self.points])
else:
for p in self.points:
p.rotation(center, angle, copy=False)
def translation(self, offset, copy=True):
if copy:
return Line2D(
*[p.translation(offset, copy=True) for p in self.points])
else:
for p in self.points:
p.translation(offset, copy=False)
def plot(self, ax=None, color='k', dashed=True):
if ax is None:
fig, ax = plt.subplots()
if version.parse(_mpl_version) >= version.parse('3.3.2'):
if dashed:
ax.axline((self.point1.x, self.point1.y),
(self.point2.x, self.point2.y),
dashes=[30, 5, 10, 5],
color=color)
else:
ax.axline((self.point1.x, self.point1.y),
(self.point2.x, self.point2.y),
color=color)
else:
u = self.direction_vector()
p3 = self.point1 - 3 * u
p4 = self.point2 + 4 * u
if dashed:
ax.plot([p3[0], p4[0]], [p3[1], p4[1]], color=color,
dashes=[30, 5, 10, 5])
else:
ax.plot([p3[0], p4[0]], [p3[1], p4[1]], color=color)
return ax
def plot_data(self, edge_style=None):
return plot_data.Line2D([self.point1.x, self.point1.y,
self.point2.x, self.point2.y],
edge_style=edge_style)
def line_intersections(self, line):
point = volmdlr.Point2D.line_intersection(self, line)
if point is not None:
point_projection1, _ = self.point_projection(point)
if point_projection1 is None:
return []
if line.__class__.__name__ == 'Line2D':
point_projection2, _ = line.point_projection(point)
if point_projection2 is None:
return []
return [point_projection1]
else:
return []
def create_tangent_circle(self, point, other_line):
"""
Computes the two circles that are tangent to 2 lines and intersect
a point located on one of the two lines.
"""
# point will be called I(x_I, y_I)
# self will be (AB)
# line will be (CD)
if math.isclose(self.point_distance(point), 0, abs_tol=1e-10):
I = volmdlr.Vector2D(point[0], point[1])
A = volmdlr.Vector2D(self.points[0][0], self.points[0][1])
B = volmdlr.Vector2D(self.points[1][0], self.points[1][1])
C = volmdlr.Vector2D(other_line.points[0][0],
other_line.points[0][1])
D = volmdlr.Vector2D(other_line.points[1][0],
other_line.points[1][1])
elif math.isclose(other_line.point_distance(point), 0, abs_tol=1e-10):
I = volmdlr.Vector2D(point[0], point[1])
C = volmdlr.Vector2D(self.points[0][0], self.points[0][1])
D = volmdlr.Vector2D(self.points[1][0], self.points[1][1])
A = volmdlr.Vector2D(other_line.points[0][0],
other_line.points[0][1])
B = volmdlr.Vector2D(other_line.points[1][0],
other_line.points[1][1])
else:
raise AttributeError("The point isn't on any of the two lines")
# CHANGEMENT DE REPAIRE
new_u = volmdlr.Vector2D((B - A))
new_u.normalize()
new_v = new_u.unit_normal_vector()
new_basis = volmdlr.Frame2D(I, new_u, new_v)
new_A = new_basis.new_coordinates(A)
new_B = new_basis.new_coordinates(B)
new_C = new_basis.new_coordinates(C)
new_D = new_basis.new_coordinates(D)
if new_C[1] == 0 and new_D[1] == 0:
# Segments are on the same line: no solution
return None, None
elif math.isclose(self.unit_direction_vector().dot(
other_line.unit_normal_vector()), 0, abs_tol=1e-06):
# Parallel segments: one solution
segments_distance = abs(new_C[1] - new_A[1])
r = segments_distance / 2
new_circle_center = volmdlr.Point2D(
(0, npy.sign(new_C[1] - new_A[1]) * r))
circle_center = new_basis.old_coordinates(new_circle_center)
circle = volmdlr.wires.Circle2D(circle_center, r)
return circle, None
elif math.isclose(self.unit_direction_vector().dot(
other_line.unit_direction_vector()), 0, abs_tol=1e-06):
# Perpendicular segments: 2 solution
line_AB = Line2D(volmdlr.Point2D(new_A), volmdlr.Point2D(new_B))
line_CD = Line2D(volmdlr.Point2D(new_C), volmdlr.Point2D(new_D))
new_pt_K = volmdlr.Point2D.line_intersection(line_AB, line_CD)
r = abs(new_pt_K[0])
new_circle_center1 = volmdlr.Point2D((0, r))
new_circle_center2 = volmdlr.Point2D((0, -r))
circle_center1 = new_basis.old_coordinates(new_circle_center1)
circle_center2 = new_basis.old_coordinates(new_circle_center2)
circle1 = volmdlr.wires.Circle2D(circle_center1, r)
circle2 = volmdlr.wires.Circle2D(circle_center2, r)
return circle1, circle2
# =============================================================================
# LES SEGMENTS SONT QUELCONQUES
# => 2 SOLUTIONS
# =============================================================================
else:
line_AB = Line2D(volmdlr.Point2D(new_A), volmdlr.Point2D(new_B))
line_CD = Line2D(volmdlr.Point2D(new_C), volmdlr.Point2D(new_D))
new_pt_K = volmdlr.Point2D.line_intersection(line_AB, line_CD)
pt_K = volmdlr.Point2D(new_basis.old_coordinates(new_pt_K))
if pt_K == I:
return None, None
# CHANGEMENT DE REPERE:
new_u2 = volmdlr.Vector2D(pt_K - I)
new_u2.normalize()
new_v2 = new_u2.normalVector(unit=True)
new_basis2 = volmdlr.Frame2D(I, new_u2, new_v2)
new_A = new_basis2.new_coordinates(A)
new_B = new_basis2.new_coordinates(B)
new_C = new_basis2.new_coordinates(C)
new_D = new_basis2.new_coordinates(D)
new_pt_K = new_basis2.new_coordinates(pt_K)
teta1 = math.atan2(new_C[1], new_C[0] - new_pt_K[0])
teta2 = math.atan2(new_D[1], new_D[0] - new_pt_K[0])
if teta1 < 0:
teta1 += math.pi
if teta2 < 0:
teta2 += math.pi
if not math.isclose(teta1, teta2, abs_tol=1e-08):
if math.isclose(teta1, math.pi, abs_tol=1e-08) or math.isclose(
teta1, 0., abs_tol=1e-08):
teta = teta2
elif math.isclose(teta2, math.pi,
abs_tol=1e-08) or math.isclose(teta2, 0.,
abs_tol=1e-08):
teta = teta1
else:
teta = teta1
r1 = new_pt_K[0] * math.sin(teta) / (1 + math.cos(teta))
r2 = new_pt_K[0] * math.sin(teta) / (1 - math.cos(teta))
new_circle_center1 = volmdlr.Point2D(0, -r1)
new_circle_center2 = volmdlr.Point2D(0, r2)
circle_center1 = new_basis2.old_coordinates(new_circle_center1)
circle_center2 = new_basis2.old_coordinates(new_circle_center2)
if new_basis.new_coordinates(circle_center1)[1] > 0:
circle1 = volmdlr.wires.Circle2D(circle_center1, r1)
circle2 = volmdlr.wires.Circle2D(circle_center2, r2)
else:
circle1 = volmdlr.wires.Circle2D(circle_center2, r2)
circle2 = volmdlr.wires.Circle2D(circle_center1, r1)
return circle1, circle2
def cut_between_two_points(self,point1, point2):
return LineSegment2D(point1, point2)
class BSplineCurve2D(Edge):
_non_serializable_attributes = ['curve']
def __init__(self,
degree: int,
control_points: List[volmdlr.Point2D],
knot_multiplicities: List[int],
knots: List[float],
weights=None, periodic=False, name=''):
self.control_points = control_points
self.degree = degree
knots = standardize_knot_vector(knots)
self.knots = knots
self.knot_multiplicities = knot_multiplicities
self.weights = weights
self.periodic = periodic
curve = BSpline.Curve()
curve.degree = degree
if weights is None:
P = [(control_points[i][0], control_points[i][1]) for i in
range(len(control_points))]
curve.ctrlpts = P
else:
Pw = [(control_points[i][0] * weights[i],
control_points[i][1] * weights[i], weights[i]) for i in
range(len(control_points))]
curve.ctrlptsw = Pw
knot_vector = []
for i, knot in enumerate(knots):
knot_vector.extend([knot] * knot_multiplicities[i])
curve.knotvector = knot_vector
self.curve = curve
start = self.point_at_abscissa(0.)
end = self.point_at_abscissa(self.length())
Edge.__init__(self, start, end, name=name)
@classmethod
def from_geomdl_curve(cls, curve):
knots = list(sorted(set(curve.knotvector)))
knot_multiplicities = [curve.knotvector.count(k) for k in knots]
return BSplineCurve2D(degree=curve.degree,
control_points=curve.ctrlpts,
knots=knots,
knot_multiplicities=knot_multiplicities
)
def bounding_rectangle(self):
points = self.polygon_points()
points_x = [p.x for p in points]
points_y = [p.y for p in points]
return (min(points_x), max(points_x),
min(points_y), max(points_y))
def length(self):
return length_curve(self.curve)
def straight_line_area(self):
return 0.
def point_at_abscissa(self, curvilinear_abscissa):
l = self.length()
adim_abs = curvilinear_abscissa/l
if adim_abs > 1:
adim_abs = 1
if adim_abs < 0:
adim_abs = 0.
return volmdlr.Point2D(*self.curve.evaluate_single(adim_abs))
def abscissa(self, point2d):
l = self.length()
res = scp.optimize.minimize_scalar(
# f,
lambda u:(point2d-self.point_at_abscissa(u)).norm(),
method='bounded',
bounds = (0., l)
)
if res.fun > 1e-4:
print(res.fun)
ax = self.plot()
point2d.plot(ax=ax)
best_point = self.point_at_abscissa(res.x)
best_point.plot(ax=ax, color='r')
raise ValueError('abscissa not found')
return res.x
def split(self, point2d):
adim_abscissa = self.abscissa(point2d)/self.length()
curve1, curve2 = split_curve(self.curve, adim_abscissa)
return [BSplineCurve2D.from_geomdl_curve(curve1),
BSplineCurve2D.from_geomdl_curve(curve2)]
def straight_line_area(self):
l = self.length()
points = self.polygon_points()
polygon = volmdlr.wires.ClosedPolygon2D(points)
return polygon.area()
def plot(self, ax=None, color='k', alpha=1, plot_points=False):
if ax is None:
_, ax = plt.subplots()
# self.curve.delta = 0.01
# points = [volmdlr.Point2D(px, py) for (px, py) in self.curve.evalpts]
l = self.length()
points = [self.point_at_abscissa(l*i/50) for i in range(51)]
xp = [p.x for p in points]
yp = [p.y for p in points]
ax.plot(xp, yp, color=color, alpha=alpha)
return ax
def to_3d(self, plane_origin, x1, x2):
control_points3D = [p.to_3d(plane_origin, x1, x2) for p in
self.control_points]
return BSplineCurve3D(self.degree, control_points3D,
self.knot_multiplicities, self.knots,
self.weights, self.periodic)
def polygon_points(self, n=15):
l = self.length()
return [self.point_at_abscissa(i*l/n) for i in range(n+1)]
def rotation(self, center, angle, copy=True):
if copy:
control_points = [p.rotation(center, angle, copy=True) \
for p in self.control_points]
return BSplineCurve2D(self.degree, control_points,
self.knot_multiplicities, self.knots,
self.weights, self.periodic)
else:
for p in self.control_points:
p.rotation(center, angle, copy=False)
def translation(self, offset, copy=True):
if copy:
control_points = [p.translation(offset, copy=True) \
for p in self.control_points]
return BSplineCurve2D(self.degree, control_points,
self.knot_multiplicities, self.knots,
self.weights, self.periodic)
else:
for p in self.control_points:
p.translation(offset, copy=False)
def line_intersections(self, line2d:Line2D):
polygon_points = self.polygon_points()
intersections = []
for p1, p2 in zip(polygon_points[:-1], polygon_points[1:]):
l = LineSegment2D(p1, p2)
intersections.extend(l.line_intersections(line2d))
return intersections
def line_crossings(self, line2d:Line2D):
polygon_points = self.polygon_points()
crossings = []
for p1, p2 in zip(polygon_points[:-1], polygon_points[1:]):
l = LineSegment2D(p1, p2)
crossings.extend(l.line_crossings(line2d))
return crossings
class BezierCurve2D(BSplineCurve2D):
def __init__(self, degree: int, control_points: List[volmdlr.Point2D],
name: str = ''):
knotvector = utilities.generate_knot_vector(degree,
len(control_points))
knot_multiplicity = [1] * len(knotvector)
BSplineCurve2D.__init__(self, degree, control_points,
knot_multiplicity, knotvector,
None, False, name)
class LineSegment2D(LineSegment):
"""
Define a line segment limited by two points
"""
def __init__(self, start, end, *, name=''):
Edge.__init__(self, start, end, name=name)
def __hash__(self):
return self._data_hash()
def _data_hash(self):
return self.start._data_hash() + self.end._data_hash()
def _data_eq(self, other_object):
if self.__class__.__name__ != other_object.__class__.__name__:
return False
return self.start == other_object.start and self.end == other_object.end
def __eq__(self, other_object):
if self.__class__.__name__ != other_object.__class__.__name__:
return False
return self.start == other_object.start and self.end == other_object.end
def length(self):
return self.end.point_distance(self.start)
def point_at_abscissa(self, curvilinear_abscissa):
return self.start + self.unit_direction_vector() * curvilinear_abscissa
def bounding_rectangle(self):
return (min(self.start.x, self.end.x), max(self.start.x, self.end.x),
min(self.start.y, self.end.y), max(self.start.y, self.end.y))
def straight_line_area(self):
return 0.
def straight_line_second_moment_area(self, point:volmdlr.Point2D):
return 0, 0, 0
def straight_line_center_of_mass(self):
return 0.5*(self.start + self.end)
def point_distance(self, point, return_other_point=False):
"""
Computes the distance of a point to segment of line
"""
if self.start == self.end:
if return_other_point:
return 0, point
return 0
distance, point = volmdlr.core_compiled.LineSegment2DPointDistance(
[(self.start.x, self.start.y), (self.end.x, self.end.y)],
(point.x, point.y))
if return_other_point:
return distance, point
return distance
def point_projection(self, point):
"""
If the projection falls outside the LineSegment2D, returns None.
"""
point, curv_abs = Line2D.point_projection(Line2D(self.start, self.end),
point)
if curv_abs < 0 or curv_abs > self.length():
return None, curv_abs
return point, curv_abs
def line_intersections(self, line: Line2D):
point = volmdlr.Point2D.line_intersection(self, line)
if point is not None:
point_projection1, _ = self.point_projection(point)
if point_projection1 is None:
return []
if line.__class__.__name__ == 'LineSegment2D':
point_projection2, _ = line.point_projection(point)
if point_projection2 is None:
return []
return [point_projection1]
else:
return []
def linesegment_intersections(self, linesegment: 'LineSegment2D'):
point = volmdlr.Point2D.line_intersection(self, linesegment)
if point is not None:
point_projection1, _ = self.point_projection(point)
if point_projection1 is None:
return []
point_projection2, _ = linesegment.point_projection(point)
if point_projection2 is None:
return []
return [point_projection1]
else:
return []
def line_crossings(self, line: 'Line2D'):
if self.direction_vector().is_colinear_to(line.direction_vector()):
return []
else:
return self.line_intersections(line)
def linesegment_crossings(self, linesegment: 'LineSegment2D'):
if self.direction_vector().is_colinear_to(
linesegment.direction_vector()):
return []
else:
return self.linesegment_intersections(linesegment)
def discretise(self, n: float):
segment_to_nodes = {}
nodes = []
if n * self.length() < 1:
segment_to_nodes[self] = [self.start, self.end]
else:
n0 = int(math.ceil(n * self.length()))
l0 = self.length() / n0
for k in range(n0):
node = self.point_at_abscissa(k * l0)
nodes.append(node)
if self.end not in nodes:
nodes.append(self.end)
if self.start not in nodes:
nodes.insert(0, self.start)
segment_to_nodes[self] = nodes
return segment_to_nodes[self]
def plot(self, ax=None, color='k', alpha=1, arrow=False, width=None,
plot_points=False):
if ax is None:
fig, ax = plt.subplots()
p1, p2 = self.start, self.end
if arrow:
if plot_points:
ax.plot([p1[0], p2[0]], [p1[1], p2[1]], color=color,
alpha=alpha, style='o-')
else:
ax.plot([p1[0], p2[0]], [p1[1], p2[1]], color=color,
alpha=alpha)
length = ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5
if width is None:
width = length / 1000.
head_length = length / 20.
head_width = head_length / 2.
else:
head_width = 2 * width
head_length = head_width
ax.arrow(p1[0], p1[1],
(p2[0] - p1[0]) / length * (length - head_length),
(p2[1] - p1[1]) / length * (length - head_length),
head_width=head_width, fc='b', linewidth=0,
head_length=head_length, width=width, alpha=0.3)
else:
if width is None:
width = 1
if plot_points:
ax.plot([p1[0], p2[0]], [p1[1], p2[1]], color=color,
marker='o', linewidth=width, alpha=alpha)
else:
ax.plot([p1[0], p2[0]], [p1[1], p2[1]], color=color,
linewidth=width, alpha=alpha)
return ax
def to_3d(self, plane_origin, x1, x2):
start = self.start.to_3d(plane_origin, x1, x2)
end = self.end.to_3d(plane_origin, x1, x2)
return LineSegment3D(start, end, name=self.name)
def reverse(self):
return LineSegment2D(self.end.copy(), self.start.copy())
def to_line(self):
return Line2D(self.start, self.end)
def rotation(self, center, angle, copy=True):
if copy:
return LineSegment2D(self.start.rotation(center, angle, copy=True),
self.end.rotation(center, angle, copy=True))
else:
for p in [self.start, self.end]:
p.rotation(center, angle, copy=False)
def translation(self, offset, copy=True):
if copy:
return LineSegment2D(self.start.translation(offset, copy=True),
self.end.translation(offset, copy=True))
else:
for p in [self.start, self.end]:
p.translation(offset, copy=False)
def frame_mapping(self, frame, side, copy=True):
"""
side = 'old' or 'new'
"""
if side == 'old':
if copy:
return LineSegment2D(frame.old_coordinates(self.start),
frame.old_coordinates(self.end))
else:
self.start = frame.old_coordinates(self.start)
self.end = frame.old_coordinates(self.end)
if side == 'new':
if copy:
return LineSegment2D(frame.new_coordinates(self.start),
frame.new_coordinates(self.end))
else:
self.start = frame.new_coordinates(self.start)
self.end = frame.new_coordinates(self.end)
def plot_data(self, edge_style: plot_data.EdgeStyle = None):
return plot_data.LineSegment2D(data=[self.start.x, self.start.y,
self.end.x, self.end.y],
edge_style=edge_style)
def CreateTangentCircle(self, point, other_line):
circle1, circle2 = Line2D.CreateTangentCircle(other_line, point, self)
if circle1 is not None:
point_J1, curv_abs1 = Line2D.point_projection(self, circle1.center)
if curv_abs1 < 0. or curv_abs1 > self.length():
circle1 = None
if circle2 is not None:
point_J2, curv_abs2 = Line2D.point_projection(self, circle2.center)
if curv_abs2 < 0. or curv_abs2 > self.length():
circle2 = None
return circle1, circle2
def polygon_points(self, angle_resolution=0):
return [self.start, self.end]
def polygon_points(self, min_x_density=None, min_y_density=None):
n = 0 # Number of points to insert between start and end
if min_x_density:
dx = abs(self.start[0] - self.end[0])
n = max(n, math.floor(dx * min_x_density))
if min_y_density:
dy = abs(self.start[1] - self.end[1])
n = max(n, math.floor(dy * min_y_density))
if n:
l = self.length()
return [self.point_at_abscissa(i * l / (n + 1)) for i in
range(n + 2)]
else:
return [self.start, self.end]
def infinite_primitive(self, offset):
n = self.normal_vector()
offset_point_1 = self.start + offset * \
n
offset_point_2 = self.end + offset * \
n
return Line2D(offset_point_1, offset_point_2)
# def border_primitive(self,infinite_primitive:volmdlr.core.Primitive2D,intersection,position):
# if position == 0 :
# return LineSegment2D(infinite_primitive.point1,intersection)
# else :
# return LineSegment2D(intersection,infinite_primitive.point2)
class Arc2D(Edge):
"""
angle: the angle measure always >= 0
"""
def __init__(self,
start: volmdlr.Point2D,
interior: volmdlr.Point2D,
end: volmdlr.Point2D,
name: str = ''):
Edge.__init__(self, start=start, end=end, name=name)
self.interior = interior
xi, yi = interior.x, interior.y
xe, ye = end.x, end.y
xs, ys = start.x, start.y
try:
A = volmdlr.Matrix22(2 * (xs - xi), 2 * (ys - yi),
2 * (xs - xe), 2 * (ys - ye))
b = - volmdlr.Vector2D(xi ** 2 + yi ** 2 - xs ** 2 - ys ** 2,
xe ** 2 + ye ** 2 - xs ** 2 - ys ** 2)
inv_A = A.inverse()
x = inv_A.vector_multiplication(b)
self.center = volmdlr.Point2D(x.x, x.y)
except ValueError:
A = npy.array([[2 * (xs - xi), 2 * (ys - yi)],
[2 * (xs - xe), 2 * (ys - ye)]])
b = - npy.array([xi ** 2 + yi ** 2 - xs ** 2 - ys ** 2,
xe ** 2 + ye ** 2 - xs ** 2 - ys ** 2])
self.center = volmdlr.Point2D(npy.linalg.solve(A, b))
r1 = self.start - self.center
r2 = self.end - self.center
ri = self.interior - self.center
self.radius = r1.norm()
angle1 = math.atan2(r1.y, r1.x)
anglei = math.atan2(ri.y, ri.x)
angle2 = math.atan2(r2.y, r2.x)
# Going trigo/clock wise from start to interior
if anglei < angle1:
trigowise_path = (anglei + volmdlr.TWO_PI) - angle1
clockwise_path = angle1 - anglei
else:
trigowise_path = anglei - angle1
clockwise_path = angle1 - anglei + volmdlr.TWO_PI
# Going trigo wise from interior to interior
if angle2 < anglei:
trigowise_path += (angle2 + volmdlr.TWO_PI) - anglei
clockwise_path += anglei - angle2
else:
trigowise_path += angle2 - anglei
clockwise_path += anglei - angle2 + volmdlr.TWO_PI
if clockwise_path > trigowise_path:
self.is_trigo = True
self.angle1 = angle1
self.angle2 = angle2
self.angle = trigowise_path
else:
# Clock wise
self.is_trigo = False
self.angle1 = angle2
self.angle2 = angle1
self.angle = clockwise_path
def _get_points(self):
return [self.start, self.interior, self.end]
points = property(_get_points)
def polygon_points(self, angle_resolution:float=10.):
number_points_tesselation = math.ceil(
angle_resolution * abs(self.angle) / 2 / math.pi)
number_points_tesselation = max(number_points_tesselation, 5)
l = self.length()
return [self.point_at_abscissa(
i / (number_points_tesselation - 1) * l) for i in
range(number_points_tesselation)]
def point_belongs(self, point2d:volmdlr.Point2D, tol:float=1e-9)->bool:
"""
Computes if the point belongs to the pizza slice drawn by the arc and its center
"""
radius = self.center.point_distance(point2d)
if radius > self.radius+tol:
return False
theta_tol = tol/radius*self.radius
p = point2d - self.center
u = self.start-self.center
u.normalize()
if self.is_trigo:
v = u.normal_vector()
else:
v = -u.normal_vector()
x, y = p.dot(u), p.dot(v)
theta = math.atan2(y, x)
if theta < -theta_tol or theta > self.angle+theta_tol:
return False
return True
def point_distance(self, point):
vector_start = self.start - self.center
vector_point = point - self.center
vector_end = self.end - self.center
if self.is_trigo:
vector_start, vector_end = vector_end, vector_start
arc_angle = volmdlr.core.clockwise_angle(vector_start, vector_end)
point_angle = volmdlr.core.clockwise_angle(vector_start, vector_point)
if point_angle <= arc_angle:
return abs(
LineSegment2D(point, self.center).length() - self.radius)
else:
return min(LineSegment2D(point, self.start).length(),
LineSegment2D(point, self.end).length())
def to_circle(self):
return volmdlr.wires.Circle2D(self.center, self.radius)
def line_intersections(self, line2d:Line2D):
circle = self.to_circle()
circle_intersection_points = circle.line_intersections(line2d)
# print(circle_intersection_points)
intersection_points = []
for pt in circle_intersection_points:
if self.point_belongs(pt):
intersection_points.append(pt)
return intersection_points
def length(self):
return self.radius * abs(self.angle)
def point_at_abscissa(self, curvilinear_abscissa):
if self.is_trigo:
return self.start.rotation(self.center,
curvilinear_abscissa / self.radius)
# return self.start.rotation(self.center, curvilinear_abscissa*self.angle)
else:
return self.start.rotation(self.center,
-curvilinear_abscissa / self.radius)
# return self.start.rotation(self.center, -curvilinear_abscissa*self.angle)
def abscissa(self, point2d: volmdlr.Point2D, tol=1e-9):
p = point2d - self.center
u = self.start-self.center
u.normalize()
if self.is_trigo:
v = u.normal_vector()
else:
v = -u.normal_vector()
x, y = p.dot(u), p.dot(v)
theta = math.atan2(y, x)
if theta < -tol or theta > self.angle+tol:
raise ValueError('Point in not in arc')
if theta < 0:
return 0.
if theta > self.angle:
return self.angle*self.radius
return self.radius * theta
def direction_vector(self, abscissa:float):
return -self.normal_vector(abscissa=abscissa).normal_vector()
def normal_vector(self, abscissa:float):
point = self.point_at_abscissa(abscissa)
if self.is_trigo:
u = self.center - point
else:
u = point - self.center
u.normalize()
return u
def middle_point(self):
l = self.length()
return self.point_at_abscissa(0.5 * l)
def area(self):
return self.radius ** 2 * self.angle / 2
def center_of_mass(self):
# u=self.middle.vector-self.center.vector
u = self.middle_point() - self.center
u.normalize()
# alpha = abs(self.angle)
return self.center + 4 / (3 * self.angle) * self.radius * math.sin(
self.angle * 0.5) * u
def bounding_rectangle(self):
# TODO: Enhance this!!!
return (self.center.x-self.radius, self.center.x+self.radius,
self.center.y-self.radius, self.center.y+self.radius)
def straight_line_area(self):
if self.angle >= math.pi:
angle = volmdlr.TWO_PI - self.angle
area = math.pi*self.radius**2 - 0.5*self.radius**2*(angle-math.sin(angle))
else:
angle = self.angle
area = 0.5 * self.radius ** 2 * (angle - math.sin(angle))
if self.is_trigo:
return area
else:
return -area
def straight_line_second_moment_area(self, point:volmdlr.Point2D):
if self.angle2 < self.angle1:
angle2 = self.angle2 + volmdlr.TWO_PI
else:
angle2 = self.angle2
angle1 = self.angle1
# Full arc section
Ix1 = self.radius ** 4 / 8 * (angle2 - angle1 + 0.5 * (
math.sin(2 * angle1) - math.sin(2 * angle2)))
Iy1 = self.radius ** 4 / 8 * (angle2 - angle1 + 0.5 * (
math.sin(2 * angle2) - math.sin(2 * angle1)))
Ixy1 = self.radius ** 4 / 8 * (
math.cos(angle1) ** 2 - math.cos(angle2) ** 2)
# Triangle
xi, yi = (self.start - self.center)
xj, yj = (self.end - self.center)
Ix2 = (yi ** 2 + yi * yj + yj ** 2) * (xi * yj - xj * yi)/12.
Iy2 = (xi ** 2 + xi * xj + xj ** 2) * (xi * yj - xj * yi)/12.
Ixy2 = (xi * yj + 2 * xi * yi + 2 * xj * yj + xj * yi) * (
xi * yj - xj * yi)/24.
if Ix2 < 0.:
Ix2, Iy2, Ixy2 = -Ix2, -Iy2, -Ixy2
if self.angle < math.pi:
if self.is_trigo:
Ix = Ix1 - Ix2
Iy = Iy1 - Iy2
Ixy = Ixy1 - Ixy2
else:
Ix = Ix2 - Ix1
Iy = Iy2 - Iy1
Ixy = Ixy2 - Ixy1
else:
print('Ixy12', Ixy1, Ixy2)
if self.is_trigo:
Ix = Ix1 + Ix2
Iy = Iy1 + Iy2
Ixy = Ixy1 + Ixy2
else:
Ix = -Ix2 - Ix1
Iy = -Iy2 - Iy1
Ixy = -Ixy2 - Ixy1
return volmdlr.geometry.huygens2d(Ix, Iy, Ixy,
self.straight_line_area(), self.center,
point)
def straight_line_center_of_mass(self):
if self.angle == math.pi:
return self.center_of_mass()
u = self.middle_point() - self.center
u.normalize()
if self.angle >= math.pi:
u = -u
bissec = Line2D(self.center, self.center+u)
string = Line2D(self.start, self.end)
p = volmdlr.Point2D.line_intersection(bissec, string)
a = p.point_distance(self.start)
h = p.point_distance(self.center)
triangle_area = h*a
alpha = abs(self.angle)
triangle_cog = self.center + 2/3. * h * u
if self.angle < math.pi:
cog = (self.center_of_mass()*self.area()-triangle_area*triangle_cog)/abs(self.straight_line_area())
else:
cog = (self.center_of_mass()*self.area()+triangle_area*triangle_cog)/abs(self.straight_line_area())
# ax = self.plot()
# bissec.plot(ax=ax, color='grey')
# self.center.plot(ax=ax)
# string.plot(ax=ax, color='grey')
# triangle_cog.plot(ax=ax, color='green')
# self.center_of_mass().plot(ax=ax, color='red')
#
# cog_line = Line2D(volmdlr.O2D, self.center_of_mass()*self.area()-triangle_area*triangle_cog)
# cog_line.plot(ax=ax)
#
# cog.plot(ax=ax, color='b')
# ax.set_aspect('equal')
return cog
def plot(self, ax=None, color='k', alpha=1, plot_points=False):
if ax is None:
fig, ax = plt.subplots()
if plot_points:
for p in [self.center, self.start, self.interior, self.end]:
p.plot(ax=ax, color=color, alpha=alpha)
ax.add_patch(matplotlib.patches.Arc(self.center, 2 * self.radius,
2 * self.radius, angle=0,
theta1=self.angle1 * 0.5 / math.pi * 360,
theta2=self.angle2 * 0.5 / math.pi * 360,
color=color,
alpha=alpha))
return ax
def to_3d(self, plane_origin, x, y):
ps = self.start.to_3d(plane_origin, x, y)
pi = self.interior.to_3d(plane_origin, x, y)
pe = self.end.to_3d(plane_origin, x, y)
return volmdlr.edges.Arc3D(ps, pi, pe, name=self.name)
def rotation(self, center, angle, copy=True):
if copy:
return Arc2D(*[p.rotation(center, angle, copy=True) for p in
[self.start, self.interior, self.end]])
else:
self.__init__(*[p.rotation(center, angle, copy=True) for p in
[self.start, self.interior, self.end]])
def translation(self, offset, copy=True):
if copy:
return Arc2D(*[p.translation(offset, copy=True) for p in
[self.start, self.interior, self.end]])
else:
self.__init__(*[p.translation(offset, copy=True) for p in
[self.start, self.interior, self.end]])
def frame_mapping(self, frame, side, copy=True):
"""
side = 'old' or 'new'
"""
if copy:
return Arc2D(*[p.frame_mapping(frame, side, copy=True) for p in
[self.start, self.interior, self.end]])
else:
self.__init__(*[p.frame_mapping(frame, side, copy=True) for p in
[self.start, self.interior, self.end]])
def second_moment_area(self, point):
"""
Second moment area of part of disk
"""
if self.angle2 < self.angle1:
angle2 = self.angle2 + volmdlr.TWO_PI
else:
angle2 = self.angle2
angle1 = self.angle1
Ix = self.radius ** 4 / 8 * (angle2 - angle1 + 0.5 * (
math.sin(2 * angle1) - math.sin(2 * angle2)))
Iy = self.radius ** 4 / 8 * (angle2 - angle1 + 0.5 * (
math.sin(2 * angle2) - math.sin(2 * angle1)))
Ixy = self.radius ** 4 / 8 * (
math.cos(angle1) ** 2 - math.cos(angle2) ** 2)
Ic = npy.array([[Ix, Ixy], [Ixy, Iy]])
# Must be computed at center, so huygens related to center
return volmdlr.geometry.huygens2d(Ix, Iy, Ixy, self.area(), self.center, point)
def discretise(self, n: float):
arc_to_nodes = {}
nodes = []
if n * self.length() < 1:
arc_to_nodes[self] = [self.start, self.end]
else:
n0 = int(math.ceil(n * self.length()))
l0 = self.length() / n0
for k in range(n0):
node = self.point_at_abscissa(k * l0)
nodes.append(node)
nodes.insert(len(nodes), self.end)
arc_to_nodes[self] = nodes
return arc_to_nodes[self]
def plot_data(self, edge_style: plot_data.EdgeStyle = None,
anticlockwise: bool = None):
list_node = self.polygon_points()
data = []
for nd in list_node:
data.append({'x': nd.x, 'y': nd.y})
return plot_data.Arc2D(cx=self.center.x,
cy=self.center.y,
r=self.radius,
start_angle=self.angle1,
end_angle=self.angle2,
edge_style=edge_style,
data=data,
anticlockwise=anticlockwise,
name=self.name)
def copy(self):
return Arc2D(self.start.copy(),
self.interior.copy(),
self.end.copy())
def split(self, split_point: volmdlr.Point2D):
abscissa = self.abscissa(split_point)
return [Arc2D(self.start,
self.point_at_abscissa(0.5 * abscissa),
split_point),
Arc2D(split_point,
self.point_at_abscissa(1.5 * abscissa),
self.end)
]
def polygon_points(self, angle_resolution=10):
# densities = []
# for d in [min_x_density, min_y_density]:
# if d:
# densities.append(d)
# if densities:
# number_points = max(number_points,
# min(densities) * self.angle * self.radius)
number_points = math.ceil(self.angle * angle_resolution)
l = self.length()
return [self.point_at_abscissa(i * l / number_points) \
for i in range(number_points + 1)]
def infinite_primitive(self, offset):
if not self.is_trigo:
radius = self.radius + offset
else:
radius = self.radius - offset
return FullArc2D(self.center, self.center+radius*volmdlr.Point2D(1, 0.),
is_trigo = self.is_trigo)
def complementary(self):
interior = self.middle_point().rotation(self.center, math.pi)
return Arc2D(self.start, interior, self.end)
class FullArc2D(Edge):
"""
An edge that starts at start_end, ends at the same point after having described
a circle
"""
def __init__(self, center: volmdlr.Point2D, start_end: volmdlr.Point2D,
is_trigo=True,
name: str = ''):
self.center = center
self.radius = center.point_distance(start_end)
self.angle = volmdlr.TWO_PI
self.is_trigo = is_trigo
Edge.__init__(self, start_end, start_end,
name=name) # !!! this is dangerous
def __hash__(self):
return hash(self.radius)
# return hash(self.center) + 5*hash(self.start)
def __eq__(self, other_arc):
if self.__class__.__name__ != other_arc.__class__.__name__:
return False
return (self.center == other_arc.center) \
and (self.start_end == other_arc.start_end)
def bounding_rectangle(self):
xmin = self.center.x - self.radius
xmax = self.center.x + self.radius
ymin = self.center.y - self.radius
ymax = self.center.y + self.radius
return xmin, xmax, ymin, ymax
def area(self):
return math.pi * self.radius ** 2
def straight_line_area(self):
area = self.area()
if self.is_trigo:
return area
else:
return -area
def to_3d(self, plane_origin, x, y):
center = self.center.to_3d(plane_origin, x, y)
start = self.start.to_3d(plane_origin, x, y)
z = x.cross(y)
z.normalize()
return FullArc3D(center, start, z)
def length(self):
return volmdlr.TWO_PI * self.radius
def point_at_abscissa(self, abscissa):
angle = abscissa / self.radius
return self.start.rotation(self.center, angle)
def polygon_points(self, angle_resolution=10):
number_points = math.ceil(self.angle * angle_resolution)
l = self.length()
return [self.point_at_abscissa(i * l / number_points) \
for i in range(number_points + 1)]
def polygonization(self):
# def polygon_points(self, points_per_radian=10):
# densities = []
# for d in [min_x_density, min_y_density]:
# if d:
# densities.append(d)
# if densities:
# number_points = max(number_points,
# min(densities) * self.angle * self.radius)
return volmdlr.wires.ClosedPolygon2D(self.polygon_points())
def plot(self, ax=None, color='k', alpha=1, plot_points=False,
linestyle='-', linewidth=1):
if ax is None:
fig, ax = plt.subplots()
if self.radius > 0:
ax.add_patch(matplotlib.patches.Arc((self.center.x, self.center.y),
2 * self.radius,
2 * self.radius,
angle=0,
theta1=0,
theta2=360,
color=color,
linestyle=linestyle,
linewidth=linewidth))
if plot_points:
ax.plot([self.start.x], [self.start.y], 'o',
color=color, alpha=alpha)
return ax
def cut_between_two_points(self, point1, point2):
x1, y1 = point1 - self.center
x2, y2 = point2 - self.center
angle1 = math.atan2(y1, x1)
angle2 = math.atan2(y2, x2)
if angle2 < angle1:
angle2 += volmdlr.TWO_PI
angle_i = 0.5*(angle1+angle2)
interior = point1.rotation(self.center, angle_i)
arc = Arc2D(point1, interior, point2)
if self.is_trigo != arc.is_trigo:
arc = arc.complementary()
return arc
def line_intersections(self, line2d:Line2D, tol=1e-9):
# Duplicate from circle
Q = self.center
if line2d.points[0] == self.center:
P1 = line2d.points[1]
V = line2d.points[0] - line2d.points[1]
else:
P1 = line2d.points[0]
V = line2d.points[1] - line2d.points[0]
a = V.dot(V)
b = 2 * V.dot(P1 - Q)
c = P1.dot(P1) + Q.dot(Q) - 2 * P1.dot(Q) - self.radius ** 2
disc = b ** 2 - 4 * a * c
if math.isclose(disc, 0., abs_tol=tol):
t1 = -b / (2 * a)
return [P1 + t1 * V]
elif disc > 0:
sqrt_disc = math.sqrt(disc)
t1 = (-b + sqrt_disc) / (2 * a)
t2 = (-b - sqrt_disc) / (2 * a)
return [P1 + t1 * V,
P1 + t2 * V]
else:
return []
class ArcEllipse2D(Edge):
"""
"""
def __init__(self, start, interior, end, center, major_dir, name='',
extra=None):
Edge.__init__(self, start, end, name)
self.interior = interior
self.center = center
self.extra = extra
self.major_dir = major_dir
self.minor_dir = self.major_dir.deterministic_unit_normal_vector()
frame = volmdlr.Frame2D(self.center, self.major_dir, self.minor_dir)
start_new, end_new = frame.new_coordinates(
self.start), frame.new_coordinates(self.end)
interior_new, center_new = frame.new_coordinates(
self.interior), frame.new_coordinates(self.center)
#### from : https://math.stackexchange.com/questions/339126/how-to-draw-an-ellipse-if-a-center-and-3-arbitrary-points-on-it-are-given
def theta_A_B(s, i, e,
c): # theta=angle d'inclinaison ellipse par rapport à horizontal(sens horaire),A=demi grd axe, B=demi petit axe
xs, ys, xi, yi, xe, ye = s[0] - c[0], s[1] - c[1], i[0] - c[0], i[
1] - c[1], e[0] - c[0], e[1] - c[1]
A = npy.array(([xs ** 2, ys ** 2, 2 * xs * ys],
[xi ** 2, yi ** 2, 2 * xi * yi],
[xe ** 2, ye ** 2, 2 * xe * ye]))
invA = npy.linalg.inv(A)
One = npy.array(([1],
[1],
[1]))
C = npy.dot(invA, One) # matrice colonne de taille 3
theta = 0
c1 = C[0] + C[1]
c2 = (C[1] - C[0]) / math.cos(2 * theta)
gdaxe = math.sqrt((2 / (c1 - c2)))
ptax = math.sqrt((2 / (c1 + c2)))
return theta, gdaxe, ptax
if start == end:
extra_new = frame.new_coordinates(self.extra)
theta, A, B = theta_A_B(start_new, extra_new, interior_new,
center_new)
else:
theta, A, B = theta_A_B(start_new, interior_new, end_new,
center_new)
self.Gradius = A
self.Sradius = B
self.theta = theta
# Angle pour start
u1, u2 = start_new.vector[0] / self.Gradius, start_new.vector[
1] / self.Sradius
angle1 = volmdlr.core.sin_cos_angle(u1, u2)
# Angle pour end
u3, u4 = end_new.vector[0] / self.Gradius, end_new.vector[
1] / self.Sradius
angle2 = volmdlr.core.sin_cos_angle(u3, u4)
# Angle pour interior
u5, u6 = interior_new.vector[0] / self.Gradius, interior_new.vector[
1] / self.Sradius
anglei = volmdlr.core.sin_cos_angle(u5, u6)
# Going trigo/clock wise from start to interior
if anglei < angle1:
trigowise_path = (anglei + volmdlr.TWO_PI) - angle1
clockwise_path = angle1 - anglei
else:
trigowise_path = anglei - angle1
clockwise_path = angle1 - anglei + volmdlr.TWO_PI
# Going trigo wise from interior to interior
if angle2 < anglei:
trigowise_path += (angle2 + volmdlr.TWO_PI) - anglei
clockwise_path += anglei - angle2
else:
trigowise_path += angle2 - anglei
clockwise_path += anglei - angle2 + volmdlr.TWO_PI
if clockwise_path > trigowise_path:
self.is_trigo = True
self.angle = trigowise_path
else:
# Clock wise
self.is_trigo = False
self.angle = clockwise_path
if self.start == self.end or self.angle == 0:
self.angle = volmdlr.volmdlr.TWO_PI
if self.is_trigo: # sens trigo
self.offset_angle = angle1
else:
self.offset_angle = angle2
def _get_points(self):
return self.polygon_points()
points = property(_get_points)
def polygon_points(self, angle_resolution=40):
number_points_tesselation = math.ceil(
angle_resolution * abs(0.5 * self.angle / math.pi))
frame2d = volmdlr.Frame2D(self.center, self.major_dir, self.minor_dir)
polygon_points_2D = [(volmdlr.Point2D((self.Gradius * math.cos(
self.offset_angle + self.angle * i / (number_points_tesselation)),
self.Sradius * math.sin(
self.offset_angle + self.angle * i / (
number_points_tesselation)))))
for i in
range(number_points_tesselation + 1)]
global_points = []
for pt in polygon_points_2D:
global_points.append(frame2d.old_coordinates(pt))
return global_points
def to_3d(self, plane_origin, x, y):
ps = self.start.to_3d(plane_origin, x, y)
pi = self.interior.to_3d(plane_origin, x, y)
pe = self.end.to_3d(plane_origin, x, y)
pc = self.center.to_3d(plane_origin, x, y)
if self.extra is None:
pextra = None
else:
pextra = self.extra.to_3d(plane_origin, x, y)
if ps == pe:
p3 = pextra
else:
p3 = pe
plane = volmdlr.faces.Plane3D.from_3_points(ps, pi, p3)
n = plane.normal
major_dir = self.major_dir.to_3d(plane_origin, x, y)
major_dir.normalize()
return ArcEllipse3D(ps, pi, pe, pc, major_dir, normal=n,
name=self.name, extra=pextra)
def plot(self, ax=None, color='k', alpha=1):
if ax is None:
_, ax = plt.subplots()
self.interior.plot(ax=ax, color='m')
self.start.plot(ax=ax, color='r')
self.end.plot(ax=ax, color='b')
self.center.plot(ax=ax, color='y')
x = []
y = []
for px, py in self.polygon_points():
x.append(px)
y.append(py)
plt.plot(x, y, color=color, alpha=alpha)
return ax
class Line3D(Line):
_non_eq_attributes = ['name', 'basis_primitives', 'bounding_box']
"""
Define an infinite line passing through the 2 points
"""
def __init__(self, point1: volmdlr.Point3D, point2: volmdlr.Point3D,
name: str = ''):
Line.__init__(self, point1, point2, name=name)
self.bounding_box = self._bounding_box()
def _bounding_box(self):
points = [self.point1, self.point2]
xmin = min([pt[0] for pt in points])
xmax = max([pt[0] for pt in points])
ymin = min([pt[1] for pt in points])
ymax = max([pt[1] for pt in points])
zmin = min([pt[2] for pt in points])
zmax = max([pt[2] for pt in points])
return volmdlr.core.BoundingBox(xmin, xmax, ymin, ymax, zmin, zmax)
def point_at_abscissa(self, curvilinear_abscissa):
return self.point1 + (
self.point2 - self.point1) * curvilinear_abscissa
def point_belongs(self, point3d):
if point3d == self.point1:
v = point3d - self.point2
else:
v = point3d - self.point1
return self.direction_vector().is_colinear_to(v)
def plot(self, ax=None, color='k', alpha=1, dashed=True):
if ax is None:
ax = Axes3D(plt.figure())
# Line segment
x = [self.point1.x, self.point2.x]
y = [self.point1.y, self.point2.y]
z = [self.point1.z, self.point2.z]
ax.plot(x, y, z, color=color, alpha=alpha)
# Drawing 3 times length of segment on each side
u = self.point2 - self.point1
v1 = (self.point1 - 3 * u)
x1, y1, z1 = v1.x, v1.y, v1.z
v2 = (self.point2 - 3 * u)
x2, y2, z2 = v2.x, v2.y, v2.z
if dashed:
ax.plot([x1, x2], [y1, y2], [z1, z2], color=color,
dashes=[30, 5, 10, 5])
else:
ax.plot([x1, x2], [y1, y2], [z1, z2], color=color)
return ax
def plane_projection2d(self, center, x, y):
return Line2D(self.points[0].plane_projection2d(center, x, y),
self.point2.plane_projection2d(center, x, y))
def minimum_distance_points(self, other_line):
"""
Returns the points on this line and the other line that are the closest
of lines
"""
u = self.point2 - self.point1
v = other_line.point2 - other_line.point1
w = self.point1 - other_line.point1
a = u.dot(u)
b = u.dot(v)
c = v.dot(v)
d = u.dot(w)
e = v.dot(w)
s = (b * e - c * d) / (a * c - b ** 2)
t = (a * e - b * d) / (a * c - b ** 2)
p1 = self.point1 + s * u
p2 = other_line.point1 + t * v
return p1, p2
def rotation(self, center, axis, angle, copy=True):
if copy:
return Line3D(*[p.rotation(center, axis, angle, copy=True) for p in
self.points])
else:
for p in self.points:
p.rotation(center, axis, angle, copy=False)
def translation(self, offset, copy=True):
if copy:
return Line3D(
*[p.translation(offset, copy=True) for p in self.points])
else:
for p in self.points:
p.translation(offset, copy=False)
def frame_mapping(self, frame, side, copy=True):
"""
side = 'old' or 'new'
"""
if side == 'old':
if copy:
return Line3D(*[frame.old_coordinates(p) for p in self.points])
else:
for p in self.points:
self.points = [frame.old_coordinates(p) for p in
self.points]
if side == 'new':
if copy:
return Line3D(*[frame.new_coordinates(p) for p in self.points])
else:
for p in self.points:
self.points = [frame.new_coordinates(p) for p in
self.points]
def copy(self):
return Line3D(*[p.copy() for p in self.points])
@classmethod
def from_step(cls, arguments, object_dict):
point1 = object_dict[arguments[1]]
direction = object_dict[arguments[2]]
point2 = point1 + direction
return cls(point1, point2, arguments[0][1:-1])
def intersection(self, line2):
x1 = self.point1.x
y1 = self.point1.y
z1 = self.point1.z
x2 = self.point2.x
y2 = self.point2.y
z2 = self.point2.z
x3 = line2.point1.x
y3 = line2.point1.y
z3 = line2.point1.z
x4 = line2.point2.x
y4 = line2.point2.y
z4 = line2.point2.z
if x3 == 0 and x4 == 0 and y4 - y3 == 0:
x5, y5, z5 = x3, y3, z3
x6, y6, z6 = x4, y4, z4
x3, y3, z3 = x1, y1, z1
x4, y4, z4 = x2, y2, z2
x1, y1, z1 = x5, y5, z5
x2, y2, z2 = x6, y6, z6
elif y3 == 0 and y4 == 0 and x4 - x3 == 0:
x5, y5, z5 = x3, y3, z3
x6, y6, z6 = x4, y4, z4
x3, y3, z3 = x1, y1, z1
x4, y4, z4 = x2, y2, z2
x1, y1, z1 = x5, y5, z5
x2, y2, z2 = x6, y6, z6
res, list_t1 = [], []
# 2 unknown 3eq with t1 et t2 unknown
if (x2 - x1 + y1 - y2) != 0 and (y4 - y3) != 0:
t1 = (x3 - x1 + (x4 - x3) * (y1 - y3) / (y4 - y3)) / (
x2 - x1 + y1 - y2)
t2 = (y1 - y3 + (y2 - y1) * t1) / (y4 - y3)
res1 = z1 + (z2 - z1) * t1
res2 = z3 + (z4 - z3) * t2
list_t1.append(t1)
res.append([res1, res2])
if (z2 - z1 + y1 - y2) != 0 and (y4 - y3) != 0:
t1 = (z3 - z1 + (z4 - z3) * (y1 - y3) / (y4 - y3)) / (
z2 - z1 + y1 - y2)
t2 = (y1 - y3 + (y2 - y1) * t1) / (y4 - y3)
res1 = x1 + (x2 - x1) * t1
res2 = x3 + (x4 - x3) * t2
list_t1.append(t1)
res.append([res1, res2])
if (z2 - z1 + x1 - x2) != 0 and (x4 - x3) != 0:
t1 = (z3 - z1 + (z4 - z3) * (x1 - x3) / (x4 - x3)) / (
z2 - z1 + x1 - x2)
t2 = (x1 - x3 + (x2 - x1) * t1) / (x4 - x3)
res1 = y1 + (y2 - y1) * t1
res2 = y3 + (y4 - y3) * t2
list_t1.append(t1)
res.append([res1, res2])
if len(res) == 0:
return None
for pair, t1 in zip(res, list_t1):
res1, res2 = pair[0], pair[1]
if math.isclose(res1, res2,
abs_tol=1e-7): # if there is an intersection point
return volmdlr.Point3D(x1 + (x2 - x1) * t1,
y1 + (y2 - y1) * t1,
z1 + (z2 - z1) * t1)
return None
def to_step(self, current_id):
p1_content, p1_id = self.point1.to_step(current_id)
# p2_content, p2_id = self.point2.to_step(current_id+1)
current_id = p1_id + 1
u_content, u_id = volmdlr.Vector3D.to_step(
self.unit_direction_vector(),
current_id,
vector=True)
current_id = u_id + 1
content = p1_content + u_content
content += "#{} = LINE('{}',#{},#{});\n".format(current_id, self.name,
p1_id, u_id)
return content, current_id
class LineSegment3D(LineSegment):
"""
Define a line segment limited by two points
"""
def __init__(self, start: volmdlr.Point3D, end: volmdlr.Point3D,
name: str = ''):
LineSegment.__init__(self, start=start, end=end, name=name)
self.bounding_box = self._bounding_box()
def __hash__(self):
return 2 + hash(self.start) + hash(self.end)
def __eq__(self, other_linesegment3d):
if other_linesegment3d.__class__ != self.__class__:
return False
return (self.start == other_linesegment3d.start
and self.end == other_linesegment3d.end)
def _bounding_box(self):
points = [self.start, self.end]
xmin = min(self.start.x, self.end.x)
xmax = max(self.start.x, self.end.x)
ymin = min(self.start.y, self.end.y)
ymax = max(self.start.y, self.end.y)
zmin = min(self.start.z, self.end.z)
zmax = max(self.start.z, self.end.z)
return volmdlr.core.BoundingBox(xmin, xmax, ymin, ymax, zmin, zmax)
def length(self):
return self.end.point_distance(self.start)
def point_at_abscissa(self, curvilinear_abscissa):
return self.start + curvilinear_abscissa * (
self.end - self.start) / self.length()
def normal_vector(self, abscissa=0.):
return None
def unit_normal_vector(self, abscissa=0.):
return None
def middle_point(self):
l = self.length()
return self.point_at_abscissa(0.5 * l)
def plane_projection2d(self, center, x, y):
return LineSegment2D(self.start.plane_projection2d(center, x, y),
self.end.plane_projection2d(center, x, y))
def intersection(self, segment2):
x1 = self.start.x
y1 = self.start.y
z1 = self.start.z
x2 = self.end.x
y2 = self.end.y
z2 = self.end.z
x3 = segment2.start.x
y3 = segment2.start.y
z3 = segment2.start.z
x4 = segment2.end.x
y4 = segment2.end.y
z4 = segment2.end.z
if x3 == 0 and x4 == 0 and y4 - y3 == 0:
x5, y5, z5 = x3, y3, z3
x6, y6, z6 = x4, y4, z4
x3, y3, z3 = x1, y1, z1
x4, y4, z4 = x2, y2, z2
x1, y1, z1 = x5, y5, z5
x2, y2, z2 = x6, y6, z6
elif y3 == 0 and y4 == 0 and x4 - x3 == 0:
x5, y5, z5 = x3, y3, z3
x6, y6, z6 = x4, y4, z4
x3, y3, z3 = x1, y1, z1
x4, y4, z4 = x2, y2, z2
x1, y1, z1 = x5, y5, z5
x2, y2, z2 = x6, y6, z6
res, list_t1 = [], []
# 2 unknown 3eq with t1 et t2 unknown
if (x2 - x1 + y1 - y2) != 0 and (y4 - y3) != 0:
t1 = (x3 - x1 + (x4 - x3) * (y1 - y3) / (y4 - y3)) / (
x2 - x1 + y1 - y2)
t2 = (y1 - y3 + (y2 - y1) * t1) / (y4 - y3)
res1 = z1 + (z2 - z1) * t1
res2 = z3 + (z4 - z3) * t2
list_t1.append(t1)
res.append([res1, res2])
if (z2 - z1 + y1 - y2) != 0 and (y4 - y3) != 0:
t1 = (z3 - z1 + (z4 - z3) * (y1 - y3) / (y4 - y3)) / (
z2 - z1 + y1 - y2)
t2 = (y1 - y3 + (y2 - y1) * t1) / (y4 - y3)
res1 = x1 + (x2 - x1) * t1
res2 = x3 + (x4 - x3) * t2
list_t1.append(t1)
res.append([res1, res2])
if (z2 - z1 + x1 - x2) != 0 and (x4 - x3) != 0:
t1 = (z3 - z1 + (z4 - z3) * (x1 - x3) / (x4 - x3)) / (
z2 - z1 + x1 - x2)
t2 = (x1 - x3 + (x2 - x1) * t1) / (x4 - x3)
res1 = y1 + (y2 - y1) * t1
res2 = y3 + (y4 - y3) * t2
list_t1.append(t1)
res.append([res1, res2])
if len(res) == 0:
return None
for pair, t1 in zip(res, list_t1):
res1, res2 = pair[0], pair[1]
if math.isclose(res1, res2,
abs_tol=1e-7): # if there is an intersection point
if t1 >= 0 or t1 <= 1:
return volmdlr.Point3D(x1 + (x2 - x1) * t1,
y1 + (y2 - y1) * t1,
z1 + (z2 - z1) * t1)
return None
def rotation(self, center, axis, angle, copy=True):
if copy:
return LineSegment3D(
*[p.rotation(center, axis, angle, copy=True) for p in
self.points])
else:
Edge.rotation(self, center, axis, angle, copy=False)
self.bounding_box = self._bounding_box()
def __contains__(self, point):
point1, point2 = self.start, self.end
axis = point2 - point1
test = point.rotation(point1, axis, math.pi)
if test == point:
return True
else:
return False
def translation(self, offset, copy=True):
if copy:
return LineSegment3D(
*[p.translation(offset, copy=True) for p in self.points])
else:
Edge.translation(self, offset, copy=False)
self.bounding_box = self._bounding_box()
def frame_mapping(self, frame, side, copy=True):
"""
side = 'old' or 'new'
"""
if side == 'old':
if copy:
return LineSegment3D(
*[frame.old_coordinates(p) for p in self.points])
else:
Edge.frame_mapping(self, frame, side, copy=False)
self.bounding_box = self._bounding_box()
if side == 'new':
if copy:
return LineSegment3D(
*[frame.new_coordinates(p) for p in self.points])
else:
Edge.frame_mapping(self, frame, side, copy=False)
self.bounding_box = self._bounding_box()
def copy(self):
return LineSegment3D(self.start.copy(), self.end.copy())
def plot(self, ax=None, color='k', alpha=1,
edge_ends=False, edge_direction=False):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
else:
fig = ax.figure
points = [self.start, self.end]
x = [p.x for p in points]
y = [p.y for p in points]
z = [p.z for p in points]
if edge_ends:
ax.plot(x, y, z, color=color, alpha=alpha, marker='o')
else:
ax.plot(x, y, z, color=color, alpha=alpha)
if edge_direction:
x, y, z = self.point_at_abscissa(0.5 * self.length())
u, v, w = 0.05 * self.direction_vector()
ax.quiver(x, y, z, u, v, w, length=0.15 * self.length(),
pivot='tip')
return ax
def plot2d(self, x_3D, y_3D, ax=None, color='k', width=None):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
else:
fig = ax.figure
edge2D = self.plane_projection2d(volmdlr.O3D, x_3D, y_3D)
edge2D.plot(ax=ax, color=color, width=width)
return ax
def plot_data(self, x_3D, y_3D, marker=None, color='black', stroke_width=1,
dash=False, opacity=1, arrow=False):
edge2D = self.plane_projection2d(volmdlr.O3D, x_3D, y_3D)
return edge2D.plot_data(marker, color, stroke_width,
dash, opacity, arrow)
def FreeCADExport(self, name, ndigits=6):
name = 'primitive' + str(name)
x1, y1, z1 = round(1000 * self.start, ndigits)
x2, y2, z2 = round(1000 * self.end, ndigits)
return '{} = Part.LineSegment(fc.Vector({},{},{}),fc.Vector({},{},{}))\n'.format(
name, x1, y1, z1, x2, y2, z2)
def to_line(self):
return Line3D(self.start, self.end)
def babylon_script(self, color=(1, 1, 1), name='line', type_='line',
parent=None):
if type_ == 'line' or type_ == 'dashed':
s = 'var myPoints = [];\n'
s += 'var point1 = new BABYLON.Vector3({},{},{});\n'.format(
*self.start)
s += 'myPoints.push(point1);\n'
s += 'var point2 = new BABYLON.Vector3({},{},{});\n'.format(
*self.end)
s += 'myPoints.push(point2);\n'
if type_ == 'line':
s += 'var {} = BABYLON.MeshBuilder.CreateLines("lines", {{points: myPoints}}, scene);\n'.format(
name)
elif type_ == 'dashed':
s += 'var {} = BABYLON.MeshBuilder.CreateDashedLines("lines", {{points: myPoints, dashNb:20}}, scene);'.format(
name)
s += '{}.color = new BABYLON.Color3{};\n'.format(name,
tuple(color))
elif type_ == 'tube':
radius = 0.03 * self.start.point_distance(self.end)
s = 'var points = [new BABYLON.Vector3({},{},{}), new BABYLON.Vector3({},{},{})];\n'.format(
*self.start, *self.end)
s += 'var {} = BABYLON.MeshBuilder.CreateTube("frame_U", {{path: points, radius: {}}}, {});'.format(
name, radius, parent)
# s += 'line.material = red_material;\n'
else:
raise NotImplementedError
if parent is not None:
s += '{}.parent = {};\n'.format(name, parent)
return s
def to_2d(self, plane_origin, x1, x2):
p2D = [p.to_2d(plane_origin, x1, x2) for p in (self.start, self.end)]
return LineSegment2D(*p2D, name=self.name)
def reverse(self):
return LineSegment3D(self.end.copy(), self.start.copy())
def minimum_distance_points(self, other_line):
"""
Returns the points on this line and the other line that are the closest
of lines
"""
u = self.end - self.start
v = other_line.end - other_line.start
w = self.start - other_line.start
a = u.dot(u)
b = u.dot(v)
c = v.dot(v)
d = u.dot(w)
e = v.dot(w)
if (a * c - b ** 2) != 0:
s = (b * e - c * d) / (a * c - b ** 2)
t = (a * e - b * d) / (a * c - b ** 2)
p1 = self.start + s * u
p2 = other_line.start + t * v
return p1, p2
else:
return self.start, other_line.start
def Matrix_distance(self, other_line):
u = self.direction_vector()
v = other_line.direction_vector()
w = other_line.start - self.start
a = u.dot(u)
b = -u.dot(v)
d = v.dot(v)
e = w.dot(u)
f = -w.dot(v)
A = npy.array([[a, b],
[b, d]])
B = npy.array([e, f])
res = scp.optimize.lsq_linear(A, B, bounds=(0, 1))
p1 = self.point_at_abscissa(res.x[0] * self.length())
p2 = other_line.point_at_abscissa(
res.x[1] * other_line.length())
return p1, p2
def parallele_distance(self, other_linesegment):
ptA, ptB, ptC = self.start, self.end, other_linesegment.points[0]
u = volmdlr.Vector3D((ptA - ptB).vector)
u.normalize()
plane1 = volmdlr.faces.Plane3D.from_3_points(ptA, ptB, ptC)
v = u.cross(plane1.normal) # distance vector
# ptA = k*u + c*v + ptC
res = (ptA - ptC).vector
x, y, z = res[0], res[1], res[2]
u1, u2, u3 = u.vector[0], u.vector[1], u.vector[2]
v1, v2, v3 = v.vector[0], v.vector[1], v.vector[2]
if (u1 * v2 - v1 * u2) != 0 and u1 != 0:
c = (y * u1 - x * u2) / (u1 * v2 - v1 * u2)
k = (x - c * v1) / u1
if math.isclose(k * u3 + c * v3, z, abs_tol=1e-7):
return k
elif (u1 * v3 - v1 * u3) != 0 and u1 != 0:
c = (z * u1 - x * u3) / (u1 * v3 - v1 * u3)
k = (x - c * v1) / u1
if math.isclose(k * u2 + c * v2, y, abs_tol=1e-7):
return k
elif (v1 * u2 - v2 * u1) != 0 and u2 != 0:
c = (u2 * x - y * u1) / (v1 * u2 - v2 * u1)
k = (y - c * v2) / u2
if math.isclose(k * u3 + c * v3, z, abs_tol=1e-7):
return k
elif (v3 * u2 - v2 * u3) != 0 and u2 != 0:
c = (u2 * z - y * u3) / (v3 * u2 - v2 * u3)
k = (y - c * v2) / u2
if math.isclose(k * u1 + c * v1, x, abs_tol=1e-7):
return k
elif (u1 * v3 - v1 * u3) != 0 and u3 != 0:
c = (z * u1 - x * u3) / (u1 * v3 - v1 * u3)
k = (z - c * v3) / u3
if math.isclose(k * u2 + c * v2, y, abs_tol=1e-7):
return k
elif (u2 * v3 - v2 * u3) != 0 and u3 != 0:
c = (z * u2 - y * u3) / (u2 * v3 - v2 * u3)
k = (z - c * v3) / u3
if math.isclose(k * u1 + c * v1, x, abs_tol=1e-7):
return k
else:
return NotImplementedError
def minimum_distance(self, element, return_points=False):
if element.__class__ is Arc3D or element.__class__ is volmdlr.wires.Circle3D:
pt1, pt2 = element.minimum_distance_points_line(self)
if return_points:
return pt1.point_distance(pt2), pt1, pt2
else:
return pt1.point_distance(pt2)
elif element.__class__ is LineSegment3D:
p1, p2 = self.Matrix_distance(element)
if return_points:
return p1.point_distance(p2), p1, p2
else:
return p1.point_distance(p2)
elif element.__class__ is BSplineCurve3D:
points = element.points
lines = []
dist_min = math.inf
for p1, p2 in zip(points[0:-1], points[1:]):
lines.append(LineSegment3D(p1, p2))
for l in lines:
p1, p2 = self.Matrix_distance(l)
dist = p1.point_distance(p2)
if dist < dist_min:
dist_min = dist
min_points = (p1, p2)
if return_points:
p1, p2 = min_points
return dist_min, p1, p2
else:
return dist_min
else:
return NotImplementedError
def extrusion(self, extrusion_vector):
u = self.unit_direction_vector()
v = extrusion_vector.copy()
v.normalize()
w = u.cross(v)
l1 = self.length()
l2 = extrusion_vector.norm()
# outer_contour = Polygon2D([O2D, Point2D((l1, 0.)),
# Point2D((l1, l2)), Point2D((0., l2))])
plane = volmdlr.faces.Plane3D(volmdlr.Frame3D(self.start, u, v, w))
return [plane.rectangular_cut(0, l1, 0, l2)]
def revolution(self, axis_point, axis, angle):
axis_line3d = Line3D(axis_point, axis_point + axis)
if axis_line3d.point_belongs(self.start) and axis_line3d.point_belongs(self.end):
return []
p1_proj, _ = axis_line3d.point_projection(self.start)
p2_proj, _ = axis_line3d.point_projection(self.end)
d1 = self.start.point_distance(p1_proj)
d2 = self.end.point_distance(p2_proj)
if not math.isclose(d1, 0., abs_tol=1e-9):
u = (self.start - p1_proj) # Unit vector from p1_proj to p1
u.normalize()
elif not math.isclose(d2, 0., abs_tol=1e-9):
u = (self.end - p2_proj) # Unit vector from p1_proj to p1
u.normalize()
else:
return []
if u.is_colinear_to(self.direction_vector()):
# Planar face
v = axis.cross(u)
surface = volmdlr.faces.Plane3D(
volmdlr.Frame3D(p1_proj, u, v, axis))
r, R = sorted([d1, d2])
if angle == volmdlr.TWO_PI:
# Only 2 circles as countours
outer_contour2d = volmdlr.wires.Circle2D(volmdlr.O2D, R)
if not math.isclose(r, 0, abs_tol=1e-9):
inner_contours2d = [volmdlr.wires.Circle2D(volmdlr.O2D, r)]
else:
inner_contours2d = []
else:
inner_contours2d = []
if math.isclose(r, 0, abs_tol=1e-9):
# One arc and 2 lines (pizza slice)
arc2_e = volmdlr.Point2D(R, 0)
arc2_i = arc2_e.rotation(center=volmdlr.O2D,
angle=0.5 * angle)
arc2_s = arc2_e.rotation(center=volmdlr.O2D, angle=angle)
arc2 = Arc2D(arc2_s, arc2_i, arc2_e)
line1 = LineSegment2D(arc2_e, volmdlr.O2D)
line2 = LineSegment2D(volmdlr.O2D, arc2_s)
outer_contour2d = volmdlr.wires.Contour2D([arc2, line1,
line2])
else:
# Two arcs and lines
arc1_s = volmdlr.Point2D(R, 0)
arc1_i = arc1_s.rotation(center=volmdlr.O2D,
angle=0.5 * angle)
arc1_e = arc1_s.rotation(center=volmdlr.O2D, angle=angle)
arc1 = Arc2D(arc1_s, arc1_i, arc1_e)
arc2_e = volmdlr.Point2D(r, 0)
arc2_i = arc2_e.rotation(center=volmdlr.O2D,
angle=0.5 * angle)
arc2_s = arc2_e.rotation(center=volmdlr.O2D, angle=angle)
arc2 = Arc2D(arc2_s, arc2_i, arc2_e)
line1 = LineSegment2D(arc1_e, arc2_s)
line2 = LineSegment2D(arc2_e, arc1_s)
outer_contour2d = volmdlr.wires.Contour2D([arc1, line1,
arc2, line2])
return [volmdlr.faces.PlaneFace3D(surface,
volmdlr.faces.Surface2D(
outer_contour2d,
inner_contours2d))]
elif not math.isclose(d1, d2, abs_tol=1e-9):
# Conical
v = axis.cross(u)
dv = self.direction_vector()
dv.normalize()
semi_angle = math.atan2(dv.dot(u), dv.dot(axis))
cone_origin = p1_proj - d1 / math.tan(semi_angle) * axis
if semi_angle > 0.5 * math.pi:
semi_angle = math.pi - semi_angle
cone_frame = volmdlr.Frame3D(cone_origin, u, -v, -axis)
angle2 = -angle
else:
angle2 = angle
cone_frame = volmdlr.Frame3D(cone_origin, u, v, axis)
surface = volmdlr.faces.ConicalSurface3D(cone_frame,
semi_angle)
z1 = d1 / math.tan(semi_angle)
z2 = d2 / math.tan(semi_angle)
return [surface.rectangular_cut(0, angle2, z1, z2)]
else:
# Cylindrical face
v = axis.cross(u)
surface = volmdlr.faces.CylindricalSurface3D(volmdlr.Frame3D(p1_proj, u, v, axis), d1)
return [surface.rectangular_cut(0, angle,
0, (self.end - self.start).dot(axis))]
def to_step(self, current_id, surface_id=None):
line = self.to_line()
content, line_id = line.to_step(current_id)
if surface_id:
content += "#{} = SURFACE_CURVE('',#{},(#{}),.PCURVE_S1.);\n".format(
line_id + 1, line_id, surface_id)
line_id += 1
current_id = line_id + 1
start_content, start_id = self.start.to_step(current_id, vertex=True)
current_id = start_id + 1
end_content, end_id = self.end.to_step(current_id + 1, vertex=True)
content += start_content + end_content
current_id = end_id + 1
content += "#{} = EDGE_CURVE('{}',#{},#{},#{},.T.);\n".format(
current_id, self.name,
start_id, end_id, line_id)
return content, [current_id]
class BSplineCurve3D(Edge):
_non_serializable_attributes = ['curve']
def __init__(self, degree, control_points, knot_multiplicities, knots,
weights=None, periodic=False, name=''):
volmdlr.core.Primitive3D.__init__(self, name=name)
self.control_points = control_points
self.degree = degree
knots = standardize_knot_vector(knots)
self.knots = knots
self.knot_multiplicities = knot_multiplicities
self.weights = weights
self.periodic = periodic
self.name = name
curve = BSpline.Curve()
curve.degree = degree
if weights is None:
P = [(control_points[i][0], control_points[i][1],
control_points[i][2]) for i in range(len(control_points))]
curve.ctrlpts = P
else:
Pw = [(control_points[i][0] * weights[i],
control_points[i][1] * weights[i],
control_points[i][2] * weights[i], weights[i]) for i in
range(len(control_points))]
curve.ctrlptsw = Pw
knot_vector = []
for i, knot in enumerate(knots):
knot_vector.extend([knot] * knot_multiplicities[i])
curve.knotvector = knot_vector
curve.delta = 0.1
curve_points = curve.evalpts
self.curve = curve
self.points = [volmdlr.Point3D(p[0], p[1], p[2]) for p in curve_points]
Edge.__init__(self, start=self.points[0], end=self.points[-1])
def reverse(self):
return self.__class__(degree=self.degree,
control_points=self.control_points[::-1],
knot_multiplicities=self.knot_multiplicities[
::-1],
knots=self.knots[::-1],
weights=self.weights,
periodic=self.periodic)
def length(self):
"""
"""
# length = 0
# for k in range(0, len(self.points) - 1):
# length += (self.points[k] - self.points[k + 1]).norm()
# return length
return length_curve(self.curve)
def point_at_abscissa(self, curvilinear_abscissa):
unit_abscissa = curvilinear_abscissa / self.length()
return volmdlr.Point3D(*self.curve.evaluate_single(unit_abscissa))
# # copy paste from wire3D
# length = 0.
# primitives = []
# for k in range(0, len(self.points) - 1):
# primitives.append(
# LineSegment3D(self.points[k], self.points[k + 1]))
# for primitive in primitives:
# primitive_length = primitive.length()
# if length + primitive_length >= curvilinear_abscissa:
# return primitive.point_at_abscissa(
# curvilinear_abscissa - length)
# length += primitive_length
# # Outside of length
# raise ValueError
def FreeCADExport(self, ip, ndigits=3):
name = 'primitive{}'.format(ip)
points = '['
for i in range(len(self.control_points)):
point = 'fc.Vector({},{},{}),'.format(self.control_points[i][0],
self.control_points[i][1],
self.control_points[i][2])
points += point
points = points[:-1]
points += ']'
# !!! : A QUOI SERT LE DERNIER ARG DE BSplineCurve (False)?
# LA MULTIPLICITE EN 3e ARG ET LES KNOTS EN 2e ARG ?
return '{} = Part.BSplineCurve({},{},{},{},{},{},{})\n'.format(name,
points,
self.knot_multiplicities,
self.knots,
self.periodic,
self.degree,
self.weights,
False)
@classmethod
def from_step(cls, arguments, object_dict):
name = arguments[0][1:-1]
degree = int(arguments[1])
points = [object_dict[int(i[1:])] for i in arguments[2]]
# curve_form = arguments[3]
if arguments[4] == '.F.':
closed_curve = False
elif arguments[4] == '.T.':
closed_curve = True
else:
raise ValueError
# self_intersect = arguments[5]
knot_multiplicities = [int(i) for i in arguments[6][1:-1].split(",")]
knots = [float(i) for i in arguments[7][1:-1].split(",")]
# knot_spec = arguments[8]
knot_vector = []
for i, knot in enumerate(knots):
knot_vector.extend([knot] * knot_multiplicities[i])
if 9 in range(len(arguments)):
weight_data = [float(i) for i in arguments[9][1:-1].split(",")]
else:
weight_data = None
# FORCING CLOSED_CURVE = FALSE:
closed_curve = False
return cls(degree, points, knot_multiplicities, knots, weight_data,
closed_curve, name)
def to_step(self, current_id, surface_id=None):
points_ids = []
content = ''
for point in self.points:
point_content, point_id = point.to_step(current_id,
vertex=True)
content += point_content
points_ids.append(point_id)
curve_id = point_id + 1
content += "#{} = B_SPLINE_CURVE_WITH_KNOTS('{}',{},({})," \
".UNSPECIFIED.,.F.,.F.,({}),{}," \
".PIECEWISE_BEZIER_KNOTS.);\n".format(curve_id,
self.name, self.degree, volmdlr.core.step_ids_to_str(points_ids),
volmdlr.core.step_ids_to_str(self.knot_multiplicities),
tuple(self.knots)
)
if surface_id:
content += "#{} = SURFACE_CURVE('',#{},(#{}),.PCURVE_S1.);\n".format(
curve_id + 1, curve_id, surface_id)
curve_id += 1
current_id = curve_id + 1
start_content, start_id = self.start.to_step(current_id, vertex=True)
current_id = start_id + 1
end_content, end_id = self.end.to_step(current_id + 1, vertex=True)
content += start_content + end_content
current_id = end_id + 1
content += "#{} = EDGE_CURVE('{}',#{},#{},#{},.T.);\n".format(
current_id, self.name,
start_id, end_id, curve_id)
return content, [current_id]
def point_distance(self, pt1):
distances = []
for point in self.points:
# vmpt = Point3D((point[1], point[2], point[3]))
distances.append(pt1.point_distance(point))
return min(distances)
def rotation(self, center, axis, angle, copy=True):
new_control_points = [p.rotation(center, axis, angle, True) for p in
self.control_points]
new_BSplineCurve3D = BSplineCurve3D(self.degree, new_control_points,
self.knot_multiplicities,
self.knots, self.weights,
self.periodic, self.name)
if copy:
return new_BSplineCurve3D
else:
self.control_points = new_control_points
self.curve = new_BSplineCurve3D.curve
self.points = new_BSplineCurve3D.points
def translation(self, offset, copy=True):
new_control_points = [p.translation(offset, True) for p in
self.control_points]
new_BSplineCurve3D = BSplineCurve3D(self.degree, new_control_points,
self.knot_multiplicities,
self.knots, self.weights,
self.periodic, self.name)
if copy:
return new_BSplineCurve3D
else:
self.control_points = new_control_points
self.curve = new_BSplineCurve3D.curve
self.points = new_BSplineCurve3D.points
# Copy paste du LineSegment3D
def plot(self, ax=None, edge_ends=False, color='k', alpha=1,
edge_direction=False):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
else:
fig = ax.figure
x = [p.x for p in self.points]
y = [p.y for p in self.points]
z = [p.z for p in self.points]
ax.plot(x, y, z, color=color, alpha=alpha)
if edge_ends:
ax.plot(x, y, z, 'o', color=color, alpha=alpha)
return ax
def to_2d(self, plane_origin, x1, x2):
control_points2D = [p.to_2d(plane_origin, x1, x2) for p in
self.control_points]
return BSplineCurve2D(self.degree, control_points2D,
self.knot_multiplicities, self.knots,
self.weights, self.periodic, self.name)
def polygon_points(self):
return self.points
class BezierCurve3D(BSplineCurve3D):
def __init__(self, degree: int, control_points: List[volmdlr.Point3D],
name: str = ''):
knotvector = utilities.generate_knot_vector(degree,
len(control_points))
knot_multiplicity = [1] * len(knotvector)
BSplineCurve3D.__init__(self, degree, control_points,
knot_multiplicity, knotvector,
None, False, name)
class Arc3D(Edge):
"""
An arc is defined by a starting point, an end point and an interior point
"""
def __init__(self, start, interior, end, name=''):
"""
"""
self.interior = interior
Edge.__init__(self, start=start, end=end, name=name)
self.setup_arc(start, interior, end, name=name)
@classmethod
def from_angle(cls, start: volmdlr.Point3D, angle: float,
axis_point: volmdlr.Point3D, axis: volmdlr.Vector3D):
start_gen = start
int_gen = start_gen.rotation(axis_point, axis, angle / 2, copy=True)
end_gen = start_gen.rotation(axis_point, axis, angle, copy=True)
if angle == volmdlr.volmdlr.TWO_PI:
line = Line3D(axis_point, axis_point + axis)
center, _ = line.point_projection(start)
radius = center.point_distance(start)
u = start - center
v = axis.cross(u)
return volmdlr.wires.Circle3D(volmdlr.Frame3D(center, u, v, axis),
radius)
return cls(start_gen, int_gen, end_gen, axis)
def setup_arc(self, start, interior, end, name=''):
u1 = (self.interior - self.start)
u2 = (self.interior - self.end)
try:
u1.normalize()
u2.normalize()
except ZeroDivisionError:
raise ValueError(
'Start, end and interior points of an arc must be distincts')
self.normal = u2.cross(u1)
self.normal.normalize()
if u1 == u2:
u2 = self.normal.cross(u1)
u2.normalize()
v1 = self.normal.cross(u1) # v1 is normal, equal u2
v2 = self.normal.cross(u2) # equal -u1
p11 = 0.5 * (start + interior) # Mid point of segment s,m
p12 = p11 + v1
p21 = 0.5 * (end + interior) # Mid point of segment s,m
p22 = p21 + v2
l1 = Line3D(p11, p12)
l2 = Line3D(p21, p22)
try:
c1, _ = l1.minimum_distance_points(l2)
except ZeroDivisionError:
raise ValueError(
'Start, end and interior points of an arc must be distincts')
self.center = c1
self.radius = (self.center - self.start).norm()
# Determining angle
vec1 = (self.start - self.center)
vec1.normalize()
vec2 = self.normal.cross(vec1)
self.frame = volmdlr.Frame3D(self.center, vec1, vec2, self.normal)
r1 = self.start.to_2d(self.center, vec1, vec2)
r2 = self.end.to_2d(self.center, vec1, vec2)
ri = self.interior.to_2d(self.center, vec1, vec2)
angle1 = math.atan2(r1.y, r1.x)
anglei = math.atan2(ri.y, ri.x)
angle2 = math.atan2(r2.y, r2.x)
# Going trigo/clock wise from start to interior
if anglei < angle1:
trigowise_path = (anglei + volmdlr.TWO_PI) - angle1
clockwise_path = angle1 - anglei
else:
trigowise_path = anglei - angle1
clockwise_path = angle1 - anglei + volmdlr.TWO_PI
# Going trigo wise from interior to interior
if angle2 < anglei:
trigowise_path += (angle2 + volmdlr.TWO_PI) - anglei
clockwise_path += anglei - angle2
else:
trigowise_path += angle2 - anglei
clockwise_path += anglei - angle2 + volmdlr.TWO_PI
if clockwise_path > trigowise_path:
self.is_trigo = True
self.angle = trigowise_path
else:
# Clock wise
self.is_trigo = False
self.angle = clockwise_path
# if self.angle > math.pi:
# # Inverting normal to be sure to have a right defined normal for rotation
# self.normal = -self.normal
@property
def points(self):
return [self.start, self.interior, self.end]
def reverse(self):
return self.__class__(self.end.copy(),
self.interior.copy(),
self.start.copy())
def polygon_points(self, angle_resolution=40):
number_points = int(angle_resolution * self.angle + 1)
l = self.length()
polygon_points_3D = [self.point_at_abscissa(
l * i / (number_points)) for i in
range(number_points + 1)]
return polygon_points_3D
def length(self):
return self.radius * abs(self.angle)
def point_at_abscissa(self, curvilinear_abscissa):
return self.start.rotation(self.center, self.normal,
curvilinear_abscissa / self.radius,
copy=True)
def unit_direction_vector(self, abscissa):
theta = abscissa / self.radius
t0 = self.normal.cross(self.start - self.center)
t0.normalize()
tangent = t0.rotation(self.center, self.normal, theta, copy=True)
return tangent
def unit_normal_vector(self, abscissa):
return self.normal.cross(self.unit_direction_vector(abscissa))
def normal_vector(self, abscissa):
return self.normal.cross(self.unit_direction_vector(abscissa))
def rotation(self, rot_center, axis, angle, copy=True):
if copy:
new_start = self.start.rotation(rot_center, axis, angle, True)
new_interior = self.interior.rotation(rot_center, axis, angle,
True)
new_end = self.end.rotation(rot_center, axis, angle, True)
return Arc3D(new_start, new_interior, new_end, name=self.name)
else:
self.center.rotation(rot_center, axis, angle, False)
self.start.rotation(rot_center, axis, angle, False)
self.interior.rotation(rot_center, axis, angle, False)
self.end.rotation(rot_center, axis, angle, False)
[p.rotation(rot_center, axis, angle, False) for p in
self.primitives]
def translation(self, offset, copy=True):
if copy:
new_start = self.start.translation(offset, True)
new_interior = self.interior.translation(offset, True)
new_end = self.end.translation(offset, True)
return Arc3D(new_start, new_interior, new_end, name=self.name)
else:
self.center.translation(offset, False)
self.start.translation(offset, False)
self.interior.translation(offset, False)
self.end.translation(offset, False)
[p.translation(offset, False) for p in self.primitives]
def plot(self, ax=None, color='k', alpha=1,
edge_ends=False, edge_direction=False):
if ax is None:
fig = plt.figure()
ax = Axes3D(fig)
else:
fig = None
# if plot_points:
# ax.plot([self.interior[0]], [self.interior[1]], [self.interior[2]],
# color='b')
# ax.plot([self.start[0]], [self.start[1]], [self.start[2]], c='r')
# ax.plot([self.end[0]], [self.end[1]], [self.end[2]], c='r')
# ax.plot([self.interior[0]], [self.interior[1]], [self.interior[2]],
# c='g')
x = []
y = []
z = []
for px, py, pz in self.polygon_points():
x.append(px)
y.append(py)
z.append(pz)
ax.plot(x, y, z, color=color, alpha=alpha)
if edge_ends:
self.start.plot(ax=ax)
self.end.plot(ax=ax)
if edge_direction:
x, y, z = self.point_at_abscissa(0.5 * self.length())
u, v, w = 0.05 * self.unit_direction_vector(0.5 * self.length())
ax.quiver(x, y, z, u, v, w, length=0.1,
arrow_length_ratio=0.01, normalize=True)
return ax
def plot2d(self, center=volmdlr.O3D,
x3d=volmdlr.X3D, y3D=volmdlr.Y3D,
ax=None, color='k'):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
else:
fig = ax.figure
# TODO: Enhance this plot
l = self.length()
x = []
y = []
for i in range(30):
p = self.point_at_abscissa(i / (29.) * l)
xi, yi = p.plane_projection2d(center, volmdlr.X3D, volmdlr.Y3D)
x.append(xi)
y.append(yi)
ax.plot(x, y, color=color)
return ax
def FreeCADExport(self, name, ndigits=6):
xs, ys, zs = round(1000 * self.start, ndigits)
xi, yi, zi = round(1000 * self.interior, ndigits)
xe, ye, ze = round(1000 * self.end, ndigits)
return '{} = Part.Arc(fc.Vector({},{},{}),fc.Vector({},{},{}),fc.Vector({},{},{}))\n' \
.format(name, xs, ys, zs, xi, yi, zi, xe, ye, ze)
def copy(self):
return Arc3D(self.start.copy(), self.interior.copy(), self.end.copy())
def frame_mapping(self, frame, side, copy=True):
"""
side = 'old' or 'new'
"""
if side == 'old':
new_start = frame.old_coordinates(self.start.copy())
new_interior = frame.old_coordinates(self.interior.copy())
new_end = frame.old_coordinates(self.end.copy())
if copy:
return Arc3D(new_start, new_interior, new_end, normal=None,
name=self.name)
else:
self.start, self.interior, self.end = new_start, new_interior, new_end
self.setup_arc(self.start, self.interior, self.end)
if side == 'new':
new_start = frame.new_coordinates(self.start.copy())
new_interior = frame.new_coordinates(self.interior.copy())
new_end = frame.new_coordinates(self.end.copy())
if copy:
return Arc3D(new_start, new_interior, new_end, normal=None,
name=self.name)
else:
self.start, self.interior, self.end = new_start, new_interior, new_end
self.setup_arc(self.start, self.interior, self.end)
def abscissa(self, point3d: volmdlr.Point3D):
x, y, z = self.frame.new_coordinates(point3d)
u1 = x / self.radius
u2 = y / self.radius
theta = volmdlr.core.sin_cos_angle(u1, u2)
return self.radius * abs(theta)
def split(self, split_point: volmdlr.Point3D):
abscissa = self.abscissa(split_point)
return [Arc3D(self.start,
self.point_at_abscissa(0.5 * abscissa),
split_point),
Arc3D(split_point,
self.point_at_abscissa(1.5 * abscissa),
self.end)
]
def to_2d(self, plane_origin, x, y):
ps = self.start.to_2d(plane_origin, x, y)
pi = self.interior.to_2d(plane_origin, x, y)
pe = self.end.to_2d(plane_origin, x, y)
return Arc2D(ps, pi, pe, name=self.name)
def minimum_distance_points_arc(self, other_arc):
u1 = self.start - self.center
u1.normalize()
u2 = self.normal.cross(u1)
w = other_arc.center - self.center
u3 = other_arc.start - other_arc.center
u3.normalize()
u4 = other_arc.normal.cross(u3)
r1, r2 = self.radius, other_arc.radius
a, b, c, d = u1.dot(u1), u1.dot(u2), u1.dot(u3), u1.dot(u4)
e, f, g = u2.dot(u2), u2.dot(u3), u2.dot(u4)
h, i = u3.dot(u3), u3.dot(u4)
j = u4.dot(u4)
k, l, m, n, o = w.dot(u1), w.dot(u2), w.dot(u3), w.dot(u4), w.dot(w)
def distance_squared(x):
return (a * ((math.cos(x[0])) ** 2) * r1 ** 2 + e * (
(math.sin(x[0])) ** 2) * r1 ** 2
+ o + h * ((math.cos(x[1])) ** 2) * r2 ** 2 + j * (
(math.sin(x[1])) ** 2) * r2 ** 2
+ b * math.sin(2 * x[0]) * r1 ** 2 - 2 * r1 * math.cos(
x[0]) * k
- 2 * r1 * r2 * math.cos(x[0]) * math.cos(x[1]) * c
- 2 * r1 * r2 * math.cos(x[0]) * math.sin(
x[1]) * d - 2 * r1 * math.sin(x[0]) * l
- 2 * r1 * r2 * math.sin(x[0]) * math.cos(x[1]) * f
- 2 * r1 * r2 * math.sin(x[0]) * math.sin(
x[1]) * g + 2 * r2 * math.cos(x[1]) * m
+ 2 * r2 * math.sin(x[1]) * n + i * math.sin(
2 * x[1]) * r2 ** 2)
x01 = npy.array([self.angle / 2, other_arc.angle / 2])
res1 = scp.optimize.least_squares(distance_squared, x01,
bounds=[(0, 0), (
self.angle, other_arc.angle)])
p1 = self.point_at_abscissa(res1.x[0] * r1)
p2 = other_arc.point_at_abscissa(res1.x[1] * r2)
return p1, p2
def minimum_distance_points_line(self, other_line):
u = other_line.direction_vector()
k = self.start - self.center
k.normalize()
w = self.center - other_line.start
v = self.normal.cross(k)
r = self.radius
a = u.dot(u)
b = u.dot(v)
c = u.dot(k)
d = v.dot(v)
e = v.dot(k)
f = k.dot(k)
g = w.dot(u)
h = w.dot(v)
i = w.dot(k)
j = w.dot(w)
# x = (s, theta)
def distance_squared(x):
return (a * x[0] ** 2 + j + d * (
(math.sin(x[1])) ** 2) * r ** 2 + f * (
(math.cos(x[1])) ** 2) * r ** 2
- 2 * x[0] * g - 2 * x[0] * r * math.sin(x[1]) * b - 2 * x[
0] * r * math.cos(x[1]) * c
+ 2 * r * math.sin(x[1]) * h + 2 * r * math.cos(x[1]) * i
+ math.sin(2 * x[1]) * e * r ** 2)
x01 = npy.array([0.5, self.angle / 2])
x02 = npy.array([0.5, 0])
x03 = npy.array([0.5, self.angle])
res1 = scp.optimize.least_squares(distance_squared, x01,
bounds=[(0, 0), (1, self.angle)])
res2 = scp.optimize.least_squares(distance_squared, x02,
bounds=[(0, 0), (1, self.angle)])
res3 = scp.optimize.least_squares(distance_squared, x03,
bounds=[(0, 0), (1, self.angle)])
p1 = other_line.point_at_abscissa(
res1.x[0] * other_line.length())
p2 = self.point_at_abscissa(res1.x[1] * r)
res = [res2, res3]
for couple in res:
ptest1 = other_line.point_at_abscissa(
couple.x[0] * other_line.length())
ptest2 = self.point_at_abscissa(couple.x[1] * r)
dtest = ptest1.point_distance(ptest2)
if dtest < d:
p1, p2 = ptest1, ptest2
return p1, p2
def minimum_distance(self, element, return_points=False):
if element.__class__ is Arc3D or element.__class__.__name__ == 'Circle3D':
p1, p2 = self.minimum_distance_points_arc(element)
if return_points:
return p1.point_distance(p2), p1, p2
else:
return p1.point_distance(p2)
elif element.__class__ is LineSegment3D:
pt1, pt2 = self.minimum_distance_points_line(element)
if return_points:
return pt1.point_distance(pt2), pt1, pt2
else:
return pt1.point_distance(pt2)
else:
return NotImplementedError
def extrusion(self, extrusion_vector):
if self.normal.is_colinear_to(extrusion_vector):
u = self.start - self.center
u.normalize()
w = extrusion_vector.copy()
w.normalize()
v = w.cross(u)
arc2d = self.to_2d(self.center, u, v)
angle1, angle2 = arc2d.angle1, arc2d.angle2
if angle2 < angle1:
angle2 += volmdlr.TWO_PI
cylinder = volmdlr.faces.CylindricalSurface3D(volmdlr.Frame3D(self.center,
u,
v,
w),
self.radius
)
return [cylinder.rectangular_cut(angle1,
angle2,
0, extrusion_vector.norm())]
else:
raise NotImplementedError(
'Elliptic faces not handled: dot={}'.format(
self.normal.dot(extrusion_vector)
))
def revolution(self, axis_point: volmdlr.Point3D, axis: volmdlr.Vector3D,
angle: float):
line3d = Line3D(axis_point, axis_point + axis)
tore_center, _ = line3d.point_projection(self.center)
if math.isclose(tore_center.point_distance(self.center), 0.,
abs_tol=1e-9):
# Sphere
start_p, _ = line3d.point_projection(self.start)
u = self.start - start_p
if math.isclose(u.norm(), 0, abs_tol=1e-9):
end_p, _ = line3d.point_projection(self.end)
u = self.end - end_p
if math.isclose(u.norm(), 0, abs_tol=1e-9):
interior_p, _ = line3d.point_projection(self.interior)
u = self.interior - interior_p
u.normalize()
v = axis.cross(u)
arc2d = self.to_2d(self.center, u, axis)
surface = volmdlr.faces.SphericalSurface3D(
volmdlr.Frame3D(self.center, u, v, axis), self.radius)
surface.plot()
return [surface.rectangular_cut(0, angle,
arc2d.angle1, arc2d.angle2)]
else:
# Toroidal
u = self.center - tore_center
u.normalize()
v = axis.cross(u)
if not math.isclose(self.normal.dot(u), 0., abs_tol=1e-9):
raise NotImplementedError(
'Outside of plane revolution not supported')
R = tore_center.point_distance(self.center)
surface = volmdlr.faces.ToroidalSurface3D(
volmdlr.Frame3D(tore_center, u, v, axis), R,
self.radius)
arc2d = self.to_2d(tore_center, u, axis)
return [surface.rectangular_cut(0, angle,
arc2d.angle1, arc2d.angle2)]
def to_step(self, current_id):
if self.angle >= math.pi:
l = self.length()
arc1, arc2 = self.split(self.point_at_abscissa(0.33 * l))
arc2, arc3 = arc2.split(self.point_at_abscissa(0.66 * l))
content, arcs1_id = arc1.to_step_without_splitting(current_id)
arc2_content, arcs2_id = arc2.to_step_without_splitting(
arcs1_id[0] + 1)
arc3_content, arcs3_id = arc3.to_step_without_splitting(
arcs2_id[0] + 1)
content += arc2_content + arc3_content
return content, [arcs1_id[0], arcs2_id[0], arcs3_id[0]]
else:
return self.to_step_without_splitting(current_id)
def to_step_without_splitting(self, current_id, surface_id=None):
u = self.start - self.center
u.normalize()
v = self.normal.cross(u)
frame = volmdlr.Frame3D(self.center, self.normal, u, v)
content, frame_id = frame.to_step(current_id)
curve_id = frame_id + 1
content += "#{} = CIRCLE('{}', #{}, {:.6f});\n".format(curve_id, self.name,
frame_id,
self.radius * 1000,
)
if surface_id:
content += "#{} = SURFACE_CURVE('',#{},(#{}),.PCURVE_S1.);\n".format(
curve_id + 1, curve_id, surface_id)
curve_id += 1
current_id = curve_id + 1
start_content, start_id = self.start.to_step(current_id, vertex=True)
end_content, end_id = self.end.to_step(start_id + 1, vertex=True)
content += start_content + end_content
current_id = end_id + 1
content += "#{} = EDGE_CURVE('{}',#{},#{},#{},.T.);\n".format(
current_id, self.name,
start_id, end_id, curve_id)
return content, [current_id]
class FullArc3D(Edge):
"""
An edge that starts at start_end, ends at the same point after having described
a circle
"""
def __init__(self, center: volmdlr.Point3D, start_end: volmdlr.Point3D,
normal: volmdlr.Vector3D,
name: str = ''):
self.center = center
self.normal = normal
self.radius = center.point_distance(start_end)
self.angle = volmdlr.TWO_PI
Edge.__init__(self, start_end, start_end,
name=name) # !!! this is dangerous
def __hash__(self):
return hash(self.center) + 5 * hash(self.start_end)
def __eq__(self, other_arc):
return (self.center == other_arc.center) \
and (self.start == other_arc.start)
def to_2d(self, plane_origin, x1, x2):
center = self.center.to_2d(plane_origin, x1, x2)
start_end = self.start.to_2d(plane_origin, x1, x2)
return FullArc2D(center, start_end)
def length(self):
return volmdlr.TWO_PI * self.radius
def point_at_abscissa(self, abscissa):
angle = abscissa / self.radius
return self.start.rotation(self.center, self.normal, angle)
def unit_direction_vector(self, curvilinear_abscissa):
theta = curvilinear_abscissa / self.radius
t0 = self.normal.cross(self.start - self.center)
t0.normalize()
tangent = t0.rotation(self.center, self.normal, theta, copy=True)
return tangent
def polygon_points(self, angle_resolution=10):
npoints = int(angle_resolution * volmdlr.TWO_PI) + 2
polygon_points_3D = [self.start.rotation(self.center,
self.normal,
volmdlr.TWO_PI / (
npoints - 1) * i
) \
for i in range(npoints)]
return polygon_points_3D
def to_step(self, current_id, surface_id=None):
# Not calling Circle3D.to_step because of circular imports
u = self.start - self.center
u.normalize()
v = self.normal.cross(u)
frame = volmdlr.Frame3D(self.center, self.normal, u, v)
content, frame_id = frame.to_step(current_id)
curve_id = frame_id + 1
# Not calling Circle3D.to_step because of circular imports
content += "#{} = CIRCLE('{}',#{},{:.6f});\n".format(curve_id, self.name,
frame_id,
self.radius * 1000,
)
if surface_id:
content += "#{} = SURFACE_CURVE('',#{},(#{}),.PCURVE_S1.);\n".format(
curve_id + 1, curve_id, surface_id)
curve_id += 1
p1 = (self.center + u * self.radius).to_point()
# p2 = self.center + v*self.radius
# p3 = self.center - u*self.radius
# p4 = self.center - v*self.radius
p1_content, p1_id = p1.to_step(curve_id + 1, vertex=True)
content += p1_content
# p2_content, p2_id = p2.to_step(p1_id+1, vertex=True)
# p3_content, p3_id = p3.to_step(p2_id+1, vertex=True)
# p4_content, p4_id = p4.to_step(p3_id+1, vertex=True)
# content += p1_content + p2_content + p3_content + p4_content
# arc1_id = p4_id + 1
# content += "#{} = EDGE_CURVE('{}',#{},#{},#{},.T.);\n".format(arc1_id, self.name,
# p1_id, p2_id,
# circle_id)
# arc2_id = arc1_id + 1
# content += "#{} = EDGE_CURVE('{}',#{},#{},#{},.T.);\n".format(arc2_id, self.name,
# p2_id, p3_id,
# circle_id)
# arc3_id = arc2_id + 1
# content += "#{} = EDGE_CURVE('{}',#{},#{},#{},.T.);\n".format(arc3_id, self.name,
# p3_id, p4_id,
# circle_id)
# arc4_id = arc3_id + 1
# content += "#{} = EDGE_CURVE('{}',#{},#{},#{},.T.);\n".format(arc4_id, self.name,
# p4_id, p1_id,
# circle_id)
edge_curve = p1_id + 1
content += "#{} = EDGE_CURVE('{}',#{},#{},#{},.T.);\n".format(
edge_curve, self.name,
p1_id, p1_id,
curve_id)
curve_id += 1
# return content, [arc1_id, arc2_id, arc3_id, arc4_id]
return content, [edge_curve]
def plot(self, ax=None, color='k', alpha=1., edge_ends=False,
edge_direction=False):
if ax is None:
fig = plt.figure()
ax = Axes3D(fig)
x = []
y = []
z = []
for px, py, pz in self.polygon_points():
x.append(px)
y.append(py)
z.append(pz)
x.append(x[0])
y.append(y[0])
z.append(z[0])
ax.plot(x, y, z, color=color, alpha=alpha)
if edge_ends:
self.start.plot(ax=ax)
self.end.plot(ax=ax)
if edge_direction:
s = 0.5 * self.length()
x, y, z = self.point_at_abscissa(s)
tangent = self.unit_direction_vector(s)
arrow_length = 0.15 * s
ax.quiver(x, y, z, *arrow_length * tangent,
pivot='tip')
return ax
class ArcEllipse3D(Edge):
"""
An arc is defined by a starting point, an end point and an interior point
"""
def __init__(self, start, interior, end, center, major_dir,
name=''): # , extra=None):
# Extra is an additionnal point if start=end because you need 3 points on the arcellipse to define it
Edge.__init__(self, start=start, end=end, name=name)
self.interior = interior
self.center = center
major_dir.normalize()
self.major_dir = major_dir # Vector for Gradius
# self.extra = extra
u1 = (self.interior - self.start)
u2 = (self.interior - self.end)
u1.normalize()
u2.normalize()
if u1 == u2:
u2 = (self.interior - self.extra)
u2.normalize()
# if normal is None:
n = u2.cross(u1)
n.normalize()
self.normal = n
# else:
# n = normal
# n.normalize()
# self.normal = normal
self.minor_dir = self.normal.cross(self.major_dir)
frame = volmdlr.Frame3D(self.center, self.major_dir, self.minor_dir,
self.normal)
start_new, end_new = frame.new_coordinates(
self.start), frame.new_coordinates(self.end)
interior_new, center_new = frame.new_coordinates(
self.interior), frame.new_coordinates(self.center)
#### from : https://math.stackexchange.com/questions/339126/how-to-draw-an-ellipse-if-a-center-and-3-arbitrary-points-on-it-are-given
def theta_A_B(s, i, e,
c): # theta=angle d'inclinaison ellipse par rapport à horizontal(sens horaire),A=demi grd axe, B=demi petit axe
xs, ys, xi, yi, xe, ye = s[0] - c[0], s[1] - c[1], i[0] - c[0], i[
1] - c[1], e[0] - c[0], e[1] - c[1]
A = npy.array(([xs ** 2, ys ** 2, 2 * xs * ys],
[xi ** 2, yi ** 2, 2 * xi * yi],
[xe ** 2, ye ** 2, 2 * xe * ye]))
invA = npy.linalg.inv(A)
One = npy.array(([1],
[1],
[1]))
C = npy.dot(invA, One) # matrice colonne de taille 3
theta = 0.5 * math.atan(2 * C[2] / (C[1] - C[0]))
c1 = C[0] + C[1]
c2 = (C[1] - C[0]) / math.cos(2 * theta)
gdaxe = math.sqrt((2 / (c1 - c2)))
ptax = math.sqrt((2 / (c1 + c2)))
return theta, gdaxe, ptax
if start == end:
extra_new = frame.new_coordinates(self.extra)
theta, A, B = theta_A_B(start_new, extra_new, interior_new,
center_new)
else:
theta, A, B = theta_A_B(start_new, interior_new, end_new,
center_new)
self.Gradius = A
self.Sradius = B
self.theta = theta
# Angle pour start
u1, u2 = start_new.vector[0] / self.Gradius, start_new.vector[
1] / self.Sradius
angle1 = volmdlr.sin_cos_angle(u1, u2)
# Angle pour end
u3, u4 = end_new.vector[0] / self.Gradius, end_new.vector[
1] / self.Sradius
angle2 = volmdlr.sin_cos_angle(u3, u4)
# Angle pour interior
u5, u6 = interior_new.vector[0] / self.Gradius, interior_new.vector[
1] / self.Sradius
anglei = volmdlr.sin_cos_angle(u5, u6)
# Going trigo/clock wise from start to interior
if anglei < angle1:
trigowise_path = (anglei + volmdlr.TWO_PI) - angle1
clockwise_path = angle1 - anglei
else:
trigowise_path = anglei - angle1
clockwise_path = angle1 - anglei + volmdlr.TWO_PI
# Going trigo wise from interior to interior
if angle2 < anglei:
trigowise_path += (angle2 + volmdlr.TWO_PI) - anglei
clockwise_path += anglei - angle2
else:
trigowise_path += angle2 - anglei
clockwise_path += anglei - angle2 + volmdlr.TWO_PI
if clockwise_path > trigowise_path:
self.is_trigo = True
self.angle = trigowise_path
else:
# Clock wise
self.is_trigo = False
self.angle = clockwise_path
if self.start == self.end:
self.angle = volmdlr.TWO_PI
if self.is_trigo:
self.offset_angle = angle1
else:
self.offset_angle = angle2
volmdlr.core.Primitive3D.__init__(self,
basis_primitives=self.polygon_points(),
name=name)
def _get_points(self):
return self.polygon_points()
points = property(_get_points)
def polygon_points(self, resolution_for_ellipse=40):
number_points_tesselation = math.ceil(
resolution_for_ellipse * abs(0.5 * self.angle / math.pi))
plane3d = volmdlr.faces.Plane3D(self.center, self.major_dir,
self.minor_dir,
self.normal)
frame3d = volmdlr.Frame3D(self.center, plane3d.vectors[0],
plane3d.vectors[1],
plane3d.normal)
polygon_points_3D = [volmdlr.Point3D((self.Gradius * math.cos(
self.offset_angle + self.angle * i / (number_points_tesselation)),
self.Sradius * math.sin(
self.offset_angle + self.angle * i / (
number_points_tesselation)),
0)) for i in
range(number_points_tesselation + 1)]
global_points = []
for pt in polygon_points_3D:
global_points.append(frame3d.old_coordinates(pt))
return global_points
def to_2d(self, plane_origin, x, y):
ps = self.start.to_2d(plane_origin, x, y)
pi = self.interior.to_2d(plane_origin, x, y)
pe = self.end.to_2d(plane_origin, x, y)
center = self.center.to_2d(plane_origin, x, y)
if self.extra is None:
pextra = None
else:
pextra = self.extra.to_2d(plane_origin, x, y)
maj_dir2d = self.major_dir.to_2d(plane_origin, x, y)
maj_dir2d.normalize()
return ArcEllipse2D(ps, pi, pe, center, maj_dir2d, name=self.name,
extra=pextra)
def length(self):
return self.angle * math.sqrt(
(self.Gradius ** 2 + self.Sradius ** 2) / 2)
def plot(self, ax=None):
if ax is None:
fig = plt.figure()
ax = Axes3D(fig)
else:
fig = None
ax.plot([self.interior[0]], [self.interior[1]], [self.interior[2]],
color='b')
ax.plot([self.start[0]], [self.start[1]], [self.start[2]], c='r')
ax.plot([self.end[0]], [self.end[1]], [self.end[2]], c='r')
ax.plot([self.interior[0]], [self.interior[1]], [self.interior[2]],
c='g')
x = []
y = []
z = []
for px, py, pz in self.polygon_points():
x.append(px)
y.append(py)
z.append(pz)
ax.plot(x, y, z, 'k')
return ax
def plot2d(self, x3d, y3D, ax, color='k'):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
else:
fig = ax.figure
# TODO: Enhance this plot
l = self.length()
x = []
y = []
for i in range(30):
p = self.point_at_abscissa(i / (29.) * l)
xi, yi = p.plane_projection2d(volmdlr.X3D, volmdlr.Y3D)
x.append(xi)
y.append(yi)
ax.plot(x, y, color=color)
return ax
def FreeCADExport(self, name, ndigits=6):
xs, ys, zs = round(1000 * self.start, ndigits).vector
xi, yi, zi = round(1000 * self.interior, ndigits).vector
xe, ye, ze = round(1000 * self.end, ndigits).vector
return '{} = Part.Arc(fc.Vector({},{},{}),fc.Vector({},{},{}),fc.Vector({},{},{}))\n'.format(
name, xs, ys, zs, xi, yi, zi, xe, ye, ze)
| gpl-3.0 |
jmschrei/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 77 | 3825 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'fast_mcd expects at least 2 samples',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'MinCovDet expects at least 2 samples',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
cybernet14/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
loindutroupeau/courbes_vaccinales | bcg_europe.py | 1 | 3436 | #!/usr/bin/python2.7
# coding: utf-8
import matplotlib.pyplot as plt
import xlrd
import numpy
import math
import gestion_figures
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
fichier = u"../Données_recueillies/BCG_Europe.xls"
classeur = xlrd.open_workbook( fichier )
nom_des_feuilles = classeur.sheet_names()
numFeuille = 0
feuillePays = classeur.sheet_by_name( nom_des_feuilles[numFeuille] )
Lignes = enum( 'PAYS', 'PAYS_TRADUCTION', 'TAUX_INCIDENCE', 'TAUX_INCIDENCE_ENFANTS', 'RATIO_ADULTES_ENFANTS',
'COUVERTURE_MIN', 'COUVERTURE_MAX', 'ANNEE', 'PROGRAMME_A_SUIVRE' )
def getCouv( couvStr ):
if couvStr == "n/a":
couv = numpy.nan
elif couvStr == "-":
couv = -5
else:
couv = float(couvStr[0:-1])
return couv
# parcourt la feuille tant qu'il y a des pays
nomPays = 'vide'
numCol = 2
cell = feuillePays.cell_value
nomPays = cell( numCol, Lignes.PAYS )
nom_pays = []
incidence = []
incidence_enfants = []
min_couverture = []
max_couverture = []
bcg = {}
while nomPays != 'FIN':
nom_pays.append( nomPays )
_incidence = float(cell(numCol,Lignes.TAUX_INCIDENCE))
incidence.append( _incidence )
min_couv = getCouv( cell( numCol,Lignes.COUVERTURE_MIN) )
max_couv = getCouv( cell( numCol,Lignes.COUVERTURE_MAX) )
min_couverture.append( min_couv )
max_couverture.append( max_couv )
bcg[nomPays] = [_incidence, min_couv, max_couv]
numCol += 1
nomPays = cell( numCol, Lignes.PAYS_TRADUCTION )
# crée et prépare la figure
sources = [u"Euro Surveill 2006;11(3): 6-11", "(http://opac.invs.sante.fr/doc_num.php?explnum_id=4827)"]
fig = gestion_figures.FigureVaccination( 12, 6.4, sources, False )
plt.xlabel( u"Couverture du vaccin BCG (%)\n[intervalle si incertitude ; négatif si pas d'utilisation systématique]" )
plt.ylabel( u'Incidence (taux pour 100.000)' )
plt.annotate( u"Couverture BCG en 2003 et incidence selon les pays d'Europe",
(0.5, 0.94), xycoords='figure fraction', ha='center', fontsize=14 )
fig.get().subplots_adjust(bottom=0.2)
# trace des flèches pour indiquer certains pays
decalage_fleches = {}
decalage_fleches[u"Allemagne"] = [10,+10]
decalage_fleches[u"France"] = [-10,+12]
decalage_fleches[u"Grèce"] = [-0,+15]
decalage_fleches[u"Lituanie"] = [-20,+10]
decalage_fleches[u"Portugal"] = [-17,+10]
decalage_fleches[u"Roumanie"] = [-20,-10]
decalage_fleches[u"Royaume-Uni"] = [-21,+15]
decalage_fleches[u"Espagne"] = [10,10]
for pays in bcg:
if pays in decalage_fleches:
_incidence, min_couv, max_couv = bcg[pays]
decX, decY = decalage_fleches[pays]
normeDec = math.sqrt( decX*decX + decY*decY )
plt.annotate( pays, color='gray', xy=(min_couv + decX * 2/normeDec, _incidence + decY * 2/normeDec),
xytext=(min_couv + decX, _incidence + decY ),
arrowprops=dict(color='gray',shrink=0.01, width=0.8, headwidth = 8, frac=3/normeDec ) )
# trace les points
plt.scatter( min_couverture,incidence, c='b', s=30, marker='<' )
plt.scatter( max_couverture, incidence, c='b', s=30, marker='>' )
plt.hlines( incidence, min_couverture, max_couverture, colors='b' )
# ajuste les axes
plt.xlim(-10, 100)
plt.ylim(0, plt.ylim()[1])
fig.legende_sources( plt, sources, 0.08, 0.95 )
plt.show()
fig.sauvegarde_figure("BCG_Europe")
| mit |
waterponey/scikit-learn | setup.py | 3 | 10406 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
import traceback
import subprocess
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
SCIPY_MIN_VERSION = '0.9'
NUMPY_MIN_VERSION = '1.6.1'
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
extras_require={
'alldeps': (
'numpy >= {0}'.format(NUMPY_MIN_VERSION),
'scipy >= {0}'.format(SCIPY_MIN_VERSION),
),
},
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(SCIPY_MIN_VERSION)
scipy_status['version'] = scipy_version
except ImportError:
traceback.print_exc()
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(NUMPY_MIN_VERSION)
numpy_status['version'] = numpy_version
except ImportError:
traceback.print_exc()
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
NUMPY_MIN_VERSION)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
SCIPY_MIN_VERSION)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
danithaca/mxnet | example/speech_recognition/stt_utils.py | 11 | 5031 | import logging
import os
import os.path
import numpy as np
import soundfile
from numpy.lib.stride_tricks import as_strided
logger = logging.getLogger(__name__)
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
| apache-2.0 |
kazemakase/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
seap-udea/tQuakes | plots/papers/time-evolution.py | 1 | 2863 | # ############################################################
# IMPORT TOOLS
# ############################################################
from tquakes import *
from matplotlib import use
use('Agg')
import matplotlib.pyplot as plt
confile=prepareScript()
conf=execfile(confile)
# ############################################################
# CONNECT TO DATABASE
# ############################################################
connection=connectDatabase()
db=connection.cursor()
# ############################################################
# PREPARE PLOTTING REGION
# ############################################################
fig,axs=subPlots(plt,[1],b=0.15,dh=0.02)
ax=axs[0]
# ############################################################
# GET PHASES
# ############################################################
latb=center[0]-dlat/2;latu=center[0]+dlat/2
lonl=center[1]-dlon/2;lonr=center[1]+dlon/2
# ############################################################
# GET PHASES
# ############################################################
latb=center[0]-dlat/2;latu=center[0]+dlat/2
lonl=center[1]-dlon/2;lonr=center[1]+dlon/2
search="""where
qdeptherr/1<qdepth/1 and
Ml+0>=%.2f AND Ml+0<%.2f and
qdepth+0>=%.2f and qdepth+0<%.2f and
qlat+0>=%.2f and qlat+0<%.2f and
qlon+0>=%.2f and qlon+0<%.2f and
(cluster1='0' or cluster1 like '-%%')
limit %d"""%(Mlmin,Mlmax,
depthmin,depthmax,
latb,latu,
lonl,lonr,
limit)
qids,quakes=getQuakes(search,db)
nquakes=len(qids)
times=quakes[:,QJD]
print "Number of earthquakes:",nquakes
# ############################################################
# PLOTS
# ############################################################
scatter=0.1
ax.plot(times,
quakes[:,ML]+scatter*(2*numpy.random.random(nquakes)-1),
'ko',markersize=1,zorder=100)
# ############################################################
# DECORATION
# ############################################################
tmin=min(times)
tmax=max(times)
xts=numpy.linspace(tmin,tmax,20)
ax.set_xticks(xts)
xtl=[]
for xt in xts:
date=jd2gcal(int(xt),0)
xtl+=["%d-%d-%d"%(date[0],date[1],date[2])]
ax.set_xticklabels(xtl,rotation=35,
fontsize=10,horizontalalignment='right')
ax.set_xlim((tmin,tmax))
ax.grid(color='gray',ls='solid',zorder=-100)
ax.set_ylabel("Local magnitude, $M_l$",fontsize=14)
xt=gcal2jd(2008,3,21)
ax.axvline(xt[0]+xt[1],color='blue',lw=2)
ax.text(0.25,0.8,"A",fontsize=20,color='k',transform=ax.transAxes)
ax.text(0.8,0.8,"B",fontsize=20,color='k',transform=ax.transAxes)
# ############################################################
# SAVING FIGURE
# ############################################################
saveFigure(confile,fig)
| gpl-2.0 |
AndersenLab/cegwas-web | base/models.py | 2 | 20988 | import os
import arrow
import json
import pandas as pd
import numpy as np
import datetime
import requests
from io import StringIO
from flask import Markup, url_for
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import or_, func
from logzero import logger
from base.constants import URLS
from base.utils.gcloud import get_item, store_item, query_item, get_cendr_bucket, check_blob
from base.utils.aws import get_aws_client
from gcloud.datastore.entity import Entity
from collections import defaultdict
from botocore.exceptions import ClientError
from base.config import DATASET_RELEASE
db = SQLAlchemy()
class datastore_model(object):
"""
Base datastore model
Google datastore is used to store dynamic information
such as users and reports.
Note that the 'kind' must be defined within sub
"""
def __init__(self, name_or_obj=None):
"""
Args:
name_or_obj - A name for a new datastore item
or an existing one to initialize
using the datastore_model class.
"""
self.exclude_from_indexes = None
self._exists = False
if type(name_or_obj) == Entity:
# Parse JSON fields when instantiating without
# loading from gcloud.
result_out = {}
for k, v in name_or_obj.items():
if isinstance(v, str) and v.startswith("JSON:"):
result_out[k] = json.loads(v[5:])
elif v:
result_out[k] = v
self.__dict__.update(result_out)
self.kind = name_or_obj.key.kind
self.name = name_or_obj.key.name
elif name_or_obj:
self.name = name_or_obj
item = get_item(self.kind, name_or_obj)
if item:
self._exists = True
self.__dict__.update(item)
def save(self):
self._exists = True
item_data = {k: v for k, v in self.__dict__.items() if k not in ['kind', 'name'] and not k.startswith("_")}
store_item(self.kind, self.name, **item_data)
def to_dict(self):
return {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
def __repr__(self):
if hasattr(self, 'name'):
return f"<{self.kind}:{self.name}>"
else:
return f"<{self.kind}:no-name>"
class trait_ds(datastore_model):
"""
Trait class corresponds to a trait analysis within a report.
This class contains methods for submitting jobs and fetching results
for an analysis.
If a task is re-run the report will only display the latest version.
"""
kind = 'trait'
def __init__(self, *args, **kwargs):
"""
The trait_ds object adopts the task
ID assigned by AWS Fargate.
"""
self._ecs = get_aws_client('ecs')
# Get task status
self._logs = get_aws_client('logs')
super(trait_ds, self).__init__(*args, **kwargs)
self.exclude_from_indexes = ['trait_data', 'error_traceback', 'CEGWAS_VERSION', 'task_info']
# Read trait data in upon initialization.
if hasattr(self, 'trait_data'):
self._trait_df = pd.read_csv(StringIO(self.trait_data), sep='\t')
def version_link(self):
"""
Returns the data version link.
"""
release_link = url_for('data.data', selected_release=self.DATASET_RELEASE)
return Markup(f"<a href='{release_link}'>{self.DATASET_RELEASE}</a>")
def run_task(self):
"""
Runs the task
"""
# Fargate credentials
task_fargate = self._ecs.run_task(
taskDefinition=f"cendr-map-{DATASET_RELEASE}",
overrides={
'containerOverrides': [
{
'name': 'cegwas',
'command': [
'python3',
'run.py'
],
'environment': [
{
'name': 'GOOGLE_APPLICATION_CREDENTIALS',
'value': 'gcloud_fargate.json'
},
{
'name': 'REPORT_NAME',
'value': self.report_name
},
{
'name': 'TRAIT_NAME',
'value': self.trait_name
},
{
'name': 'DATASET_RELEASE',
'value': DATASET_RELEASE
}
],
}
],
},
count=1,
launchType='FARGATE',
networkConfiguration={
'awsvpcConfiguration': {
'subnets': [
'subnet-77581612',
],
'securityGroups': [
'sg-75e7860a'
],
'assignPublicIp': 'ENABLED'
}
})
task_fargate = task_fargate['tasks'][0]
# Generate trait_ds model
self.report_trait = "{}:{}".format(self.report_name, self.trait_name)
self.name = task_fargate['taskArn'].split("/")[1]
self.task_info = task_fargate
self.created_on = arrow.utcnow().datetime
self.save()
# Return the task ID
return self.name
def container_status(self):
"""
Fetch the status of the task
"""
if self.status == 'complete':
return 'complete'
try:
task_status = self._ecs.describe_tasks(tasks=[self.name])['tasks'][0]['lastStatus']
return task_status
except (IndexError, ClientError):
return 'STOPPED'
@property
def is_complete(self):
return self.status == "complete"
@property
def cegwas_version_formatted(self):
try:
git_hash = self.CEGWAS_VERSION[3].replace("(Andersenlab/cegwas@", "").replace(")", "")
return f"v{self.CEGWAS_VERSION[0]} @{git_hash} [{self.CEGWAS_VERSION[1]}]"
except:
return ""
@property
def docker_image_version(self):
try:
return json.loads(self.task_info[5:])['containers'][0]['containerArn'].split("/")[1]
except:
return ""
def get_formatted_task_log(self):
"""
Returns formatted task log
"""
try:
log = requests.get(self.gs_base_url + "/out.log").content
except:
return [f"####-##-## ##:##:## Task ID: {self.name}\n"]
return (f"####-##-## ##:##:## Task ID: {self.name}\n" + log.decode('utf-8')).splitlines()
def duration(self):
"""
Calculate how long the run took
"""
if hasattr(self, 'completed_on') and hasattr(self, 'started_on'):
diff = (self.completed_on - self.started_on)
minutes, seconds = divmod(diff.seconds, 60)
return "{:0>2d}m {:0>2d}s".format(minutes, seconds)
else:
return None
@property
def gs_path(self):
if self.REPORT_VERSION == 'v2':
return f"{self.REPORT_VERSION}/{self.name}"
elif self.REPORT_VERSION == 'v1':
return f"{self.REPORT_VERSION}/{self.report_slug}/{self.trait_name}"
@property
def gs_base_url(self):
"""
Returns the google storage base URL
The URL schema changed from REPORT_VERSION v1 to v2.
"""
if self.REPORT_VERSION == 'v2':
return f"https://storage.googleapis.com/elegansvariation.org/reports/{self.gs_path}"
elif self.REPORT_VERSION == 'v1':
return f"https://storage.googleapis.com/elegansvariation.org/reports/{self.gs_path}"
def get_gs_as_dataset(self, fname):
"""
Downloads a dataset stored as a TSV
from the folder associated with the trait
on google storage and return it as a
pandas dataframe.
"""
return pd.read_csv(f"{self.gs_base_url}/{fname}", sep="\t")
def get_gs_as_json(self, fname):
"""
Downloads a google-storage file as json
"""
return requests.get(f"{self.gs_base_url}/{fname}").json()
def list_report_files(self):
"""
Lists files with a given prefix
from the current dataset release
"""
cendr_bucket = get_cendr_bucket()
items = cendr_bucket.list_blobs(prefix=f"reports/{self.gs_path}")
return {os.path.basename(x.name): f"https://storage.googleapis.com/elegansvariation.org/{x.name}" for x in items}
def file_url(self, fname):
"""
Return the figure URL. May change with updates
to report versions.
"""
gs_url = f"{self.gs_base_url}/{fname}"
return f"{gs_url}"
class mapping_ds(datastore_model):
"""
The mapping/peak interval model
"""
kind = 'mapping'
def __init__(self, *args, **kwargs):
super(mapping_ds, self).__init__(*args, **kwargs)
class user_ds(datastore_model):
"""
The User model - for creating and retrieving
information on users.
"""
kind = 'user'
def __init__(self, *args, **kwargs):
super(user_ds, self).__init__(*args, **kwargs)
def reports(self):
filters = [('user_id', '=', self.user_id)]
# Note this requires a composite index defined very precisely.
results = query_item('trait', filters=filters, order=['user_id', '-created_on'])
results = sorted(results, key=lambda x: x['created_on'], reverse=True)
results_out = defaultdict(list)
for row in results:
results_out[row['report_slug']].append(row)
# Generate report objects
return results_out
class DictSerializable(object):
def _asdict(self):
result = {}
for key in self.__mapper__.c.keys():
result[key] = getattr(self, key)
return result
# --------- Break datastore here ---------#
class Metadata(DictSerializable, db.Model):
"""
Table for storing information about other tables
"""
__tablename__ = "metadata"
key = db.Column(db.String(50), index=True, primary_key=True)
value = db.Column(db.String)
class Strain(DictSerializable, db.Model):
__tablename__ = "strain"
species_id_method = db.Column(db.String(50), nullable=True)
species = db.Column(db.String(50), index=True)
isotype_ref_strain = db.Column(db.Boolean(), index=True)
strain = db.Column(db.String(25), primary_key=True)
isotype = db.Column(db.String(25), index=True, nullable=True)
previous_names = db.Column(db.String(100), nullable=True)
sequenced = db.Column(db.Boolean(), index=True, nullable=True) # Is whole genome sequenced [WGS_seq]
release = db.Column(db.Integer(), nullable=False, index=True)
source_lab = db.Column(db.String(), nullable=True)
latitude = db.Column(db.Float(), nullable=True)
longitude = db.Column(db.Float(), nullable=True)
landscape = db.Column(db.String(), nullable=True)
locality_description = db.Column(db.String(), nullable=True)
substrate = db.Column(db.String(), nullable=True)
substrate_comments = db.Column(db.String(), nullable=True)
substrate_temp = db.Column(db.Float())
ambient_temp = db.Column(db.Float())
ambient_humidity = db.Column(db.Float())
associated_organism = db.Column(db.String(), nullable=True)
inbreeding_status = db.Column(db.String(), nullable=True)
sampled_by = db.Column(db.String(), nullable=True)
isolated_by = db.Column(db.String(), nullable=True)
sampling_date = db.Column(db.Date(), nullable=True)
sampling_date_comment = db.Column(db.String(), nullable=True)
notes = db.Column(db.String(), nullable=True)
strain_set = db.Column(db.String(), nullable=True)
issues = db.Column(db.Boolean(), nullable=True)
issue_notes = db.Column(db.String(), nullable=True)
# Elevation is added in and computed separately
elevation = db.Column(db.Float(), nullable=True)
def __repr__(self):
return self.strain
def to_json(self):
return {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
def strain_photo_url(self):
# Checks if photo exists and returns URL if it does
try:
return check_blob(f"photos/isolation/{self.strain}.jpg").public_url
except AttributeError:
return None
def strain_bam_url(self):
"""
Return bam / bam_index url set
"""
url_set = Markup(f"""
<a href="{URLS.BAM_URL_PREFIX}/strain/{self.strain}.bam">
BAM
</a>
/
<a href="{URLS.BAM_URL_PREFIX}/strain/{self.strain}.bam.bai">
bai
</a>
""".strip())
return url_set
@classmethod
def strain_sets(cls):
df = pd.read_sql_table(cls.__tablename__, db.engine)
result = df[['strain', 'isotype', 'strain_set']].dropna(how='any') \
.groupby('strain_set') \
.agg(list) \
.to_dict()
return result['strain']
def isotype_bam_url(self):
"""
Return bam / bam_index url set
"""
url_set = Markup(f"""
<a href="{URLS.BAM_URL_PREFIX}/{self.isotype}.bam">
BAM
</a>
/
<a href="{URLS.BAM_URL_PREFIX}/{self.isotype}.bam.bai">
bai
</a>
""".strip())
return url_set
@classmethod
def cum_sum_strain_isotype(cls):
"""
Create a time-series plot of strains and isotypes collected over time
Args:
df - the strain dataset
"""
df = pd.read_sql_table(cls.__tablename__, db.engine)
# Remove strains with issues
df = df[df["issues"] == False]
cumulative_isotype = df[['isotype', 'sampling_date']].sort_values(['sampling_date'], axis=0) \
.drop_duplicates(['isotype']) \
.groupby(['sampling_date'], as_index=True) \
.count() \
.cumsum() \
.reset_index()
cumulative_isotype = cumulative_isotype.append({'sampling_date': np.datetime64(datetime.datetime.today().strftime("%Y-%m-%d")),
'isotype': len(df['isotype'].unique())}, ignore_index=True)
cumulative_strain = df[['strain', 'sampling_date']].sort_values(['sampling_date'], axis=0) \
.drop_duplicates(['strain']) \
.dropna(how='any') \
.groupby(['sampling_date']) \
.count() \
.cumsum() \
.reset_index()
cumulative_strain = cumulative_strain.append({'sampling_date': np.datetime64(datetime.datetime.today().strftime("%Y-%m-%d")),
'strain': len(df['strain'].unique())}, ignore_index=True)
df = cumulative_isotype.set_index('sampling_date') \
.join(cumulative_strain.set_index('sampling_date')) \
.reset_index()
return df
@classmethod
def release_summary(cls, release):
"""
Returns isotype and strain count for a data release.
Args:
release - the data release
"""
counts = {'strain_count': cls.query.filter((cls.release <= release) & (cls.issues == False)).count(),
'strain_count_sequenced': cls.query.filter((cls.release <= release) & (cls.issues == False) & (cls.sequenced == True)).count(),
'isotype_count': cls.query.filter((cls.release <= release) & (cls.issues == False) & (cls.isotype != None)).group_by(cls.isotype).count()}
return counts
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class WormbaseGene(DictSerializable, db.Model):
__tablename__ = 'wormbase_gene'
id = db.Column(db.Integer, primary_key=True)
chrom = db.Column(db.String(20), index=True)
chrom_num = db.Column(db.Integer(), index=True) # For sorting purposes
start = db.Column(db.Integer(), index=True)
end = db.Column(db.Integer(), index=True)
feature = db.Column(db.String(30), index=True)
strand = db.Column(db.String(1))
frame = db.Column(db.Integer(), nullable=True)
gene_id = db.Column(db.ForeignKey('wormbase_gene_summary.gene_id'), nullable=False)
gene_biotype = db.Column(db.String(30), nullable=True)
locus = db.Column(db.String(30), index=True)
transcript_id = db.Column(db.String(30), nullable=True, index=True)
transcript_biotype = db.Column(db.String(), index=True)
exon_id = db.Column(db.String(30), nullable=True, index=True)
exon_number = db.Column(db.Integer(), nullable=True)
protein_id = db.Column(db.String(30), nullable=True, index=True)
arm_or_center = db.Column(db.String(12), index=True)
gene_summary = db.relationship("WormbaseGeneSummary", backref='gene_components')
def __repr__(self):
return f"{self.gene_id}:{self.feature} [{self.seqname}:{self.start}-{self.end}]"
class WormbaseGeneSummary(DictSerializable, db.Model):
"""
This is a condensed version of the WormbaseGene model;
It is constructed out of convenience and only defines the genes
(not exons/introns/etc.)
"""
__tablename__ = "wormbase_gene_summary"
id = db.Column(db.Integer, primary_key=True)
chrom = db.Column(db.String(7), index=True)
chrom_num = db.Column(db.Integer(), index=True)
start = db.Column(db.Integer(), index=True)
end = db.Column(db.Integer(), index=True)
locus = db.Column(db.String(30), index=True)
gene_id = db.Column(db.String(25), index=True)
gene_id_type = db.Column(db.String(15), index=False)
sequence_name = db.Column(db.String(30), index=True)
biotype = db.Column(db.String(30), nullable=True)
gene_symbol = db.column_property(func.coalesce(locus, sequence_name, gene_id))
interval = db.column_property(func.printf("%s:%s-%s", chrom, start, end))
arm_or_center = db.Column(db.String(12), index=True)
@classmethod
def resolve_gene_id(cls, query):
"""
query - a locus name or transcript ID
output - a wormbase gene ID
Example:
WormbaseGene.resolve_gene_id('pot-2') --> WBGene00010195
"""
result = cls.query.filter(or_(cls.locus == query, cls.sequence_name == query)).first()
if result:
return result.gene_id
class Homologs(DictSerializable, db.Model):
"""
The homologs database combines
"""
__tablename__ = "homologs"
id = db.Column(db.Integer, primary_key=True)
gene_id = db.Column(db.ForeignKey('wormbase_gene_summary.gene_id'), nullable=False, index=True)
gene_name = db.Column(db.String(40), index=True)
homolog_species = db.Column(db.String(50), index=True)
homolog_taxon_id = db.Column(db.Integer, index=True, nullable=True) # If available
homolog_gene = db.Column(db.String(50), index=True)
homolog_source = db.Column(db.String(40))
gene_summary = db.relationship("WormbaseGeneSummary", backref='homologs', lazy='joined')
def unnest(self):
"""
Used with the gene API - returns
an unnested homolog datastructure combined with the wormbase gene summary model.
"""
self.__dict__.update(self.gene_summary.__dict__)
self.__dict__['gene_summary'] = None
return self
def __repr__(self):
return f"homolog: {self.gene_name} -- {self.homolog_gene}"
| mit |
bnaul/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 3 | 18777 | import numpy as np
import pytest
from scipy import linalg
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_array_equal, assert_no_warnings
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import ignore_warnings
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_allclose(np.exp(y_log_proba_pred1), y_proba_pred1,
rtol=1e-6, atol=1e-6, err_msg='solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert np.any(y_pred3 != y3), 'solver %s' % solver
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
@pytest.mark.parametrize("n_classes", [2, 3])
@pytest.mark.parametrize("solver", ["svd", "lsqr", "eigen"])
def test_lda_predict_proba(solver, n_classes):
def generate_dataset(n_samples, centers, covariances, random_state=None):
"""Generate a multivariate normal data given some centers and
covariances"""
rng = check_random_state(random_state)
X = np.vstack([rng.multivariate_normal(mean, cov,
size=n_samples // len(centers))
for mean, cov in zip(centers, covariances)])
y = np.hstack([[clazz] * (n_samples // len(centers))
for clazz in range(len(centers))])
return X, y
blob_centers = np.array([[0, 0], [-10, 40], [-30, 30]])[:n_classes]
blob_stds = np.array([[[10, 10], [10, 100]]] * len(blob_centers))
X, y = generate_dataset(
n_samples=90000, centers=blob_centers, covariances=blob_stds,
random_state=42
)
lda = LinearDiscriminantAnalysis(solver=solver, store_covariance=True,
shrinkage=None).fit(X, y)
# check that the empirical means and covariances are close enough to the
# one used to generate the data
assert_allclose(lda.means_, blob_centers, atol=1e-1)
assert_allclose(lda.covariance_, blob_stds[0], atol=1)
# implement the method to compute the probability given in The Elements
# of Statistical Learning (cf. p.127, Sect. 4.4.5 "Logistic Regression
# or LDA?")
precision = linalg.inv(blob_stds[0])
alpha_k = []
alpha_k_0 = []
for clazz in range(len(blob_centers) - 1):
alpha_k.append(
np.dot(precision,
(blob_centers[clazz] - blob_centers[-1])[:, np.newaxis]))
alpha_k_0.append(
np.dot(- 0.5 * (blob_centers[clazz] +
blob_centers[-1])[np.newaxis, :], alpha_k[-1]))
sample = np.array([[-22, 22]])
def discriminant_func(sample, coef, intercept, clazz):
return np.exp(intercept[clazz] + np.dot(sample, coef[clazz]))
prob = np.array([float(
discriminant_func(sample, alpha_k, alpha_k_0, clazz) /
(1 + sum([discriminant_func(sample, alpha_k, alpha_k_0, clazz)
for clazz in range(n_classes - 1)]))) for clazz in range(
n_classes - 1)])
prob_ref = 1 - np.sum(prob)
# check the consistency of the computed probability
# all probabilities should sum to one
prob_ref_2 = float(
1 / (1 + sum([discriminant_func(sample, alpha_k, alpha_k_0, clazz)
for clazz in range(n_classes - 1)]))
)
assert prob_ref == pytest.approx(prob_ref_2)
# check that the probability of LDA are close to the theoretical
# probabilties
assert_allclose(lda.predict_proba(sample),
np.hstack([prob, prob_ref])[np.newaxis],
atol=1e-2)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert X_transformed.shape[1] == 1
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert X_transformed.shape[1] == 1
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
assert clf_lda_eigen.explained_variance_ratio_.shape == (2,), (
"Unexpected length for explained_variance_ratio_")
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
assert clf_lda_svd.explained_variance_ratio_.shape == (2,), (
"Unexpected length for explained_variance_ratio_")
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert clf.fit(x, y).score(x, y) == 1.0, (
'using covariance: %s' % solver)
def test_lda_store_covariance():
# Test for solver 'lsqr' and 'eigen'
# 'store_covariance' has no effect on 'lsqr' and 'eigen' solvers
for solver in ('lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver).fit(X6, y6)
assert hasattr(clf, 'covariance_')
# Test the actual attribute:
clf = LinearDiscriminantAnalysis(solver=solver,
store_covariance=True).fit(X6, y6)
assert hasattr(clf, 'covariance_')
assert_array_almost_equal(
clf.covariance_,
np.array([[0.422222, 0.088889], [0.088889, 0.533333]])
)
# Test for SVD solver, the default is to not set the covariances_ attribute
clf = LinearDiscriminantAnalysis(solver='svd').fit(X6, y6)
assert not hasattr(clf, 'covariance_')
# Test the actual attribute:
clf = LinearDiscriminantAnalysis(solver=solver,
store_covariance=True).fit(X6, y6)
assert hasattr(clf, 'covariance_')
assert_array_almost_equal(
clf.covariance_,
np.array([[0.422222, 0.088889], [0.088889, 0.533333]])
)
@pytest.mark.parametrize('n_features', [3, 5])
@pytest.mark.parametrize('n_classes', [5, 3])
def test_lda_dimension_warning(n_classes, n_features):
rng = check_random_state(0)
n_samples = 10
X = rng.randn(n_samples, n_features)
# we create n_classes labels by repeating and truncating a
# range(n_classes) until n_samples
y = np.tile(range(n_classes), n_samples // n_classes + 1)[:n_samples]
max_components = min(n_features, n_classes - 1)
for n_components in [max_components - 1, None, max_components]:
# if n_components <= min(n_classes - 1, n_features), no warning
lda = LinearDiscriminantAnalysis(n_components=n_components)
assert_no_warnings(lda.fit, X, y)
for n_components in [max_components + 1,
max(n_features, n_classes - 1) + 1]:
# if n_components > min(n_classes - 1, n_features), raise error.
# We test one unit higher than max_components, and then something
# larger than both n_features and n_classes - 1 to ensure the test
# works for any value of n_component
lda = LinearDiscriminantAnalysis(n_components=n_components)
msg = "n_components cannot be larger than "
with pytest.raises(ValueError, match=msg):
lda.fit(X, y)
@pytest.mark.parametrize("data_type, expected_type", [
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)
])
def test_lda_dtype_match(data_type, expected_type):
for (solver, shrinkage) in solver_shrinkage:
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
clf.fit(X.astype(data_type), y.astype(data_type))
assert clf.coef_.dtype == expected_type
def test_lda_numeric_consistency_float32_float64():
for (solver, shrinkage) in solver_shrinkage:
clf_32 = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
clf_32.fit(X.astype(np.float32), y.astype(np.float32))
clf_64 = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
clf_64.fit(X.astype(np.float64), y.astype(np.float64))
# Check value consistency between types
rtol = 1e-6
assert_allclose(clf_32.coef_, clf_64.coef_, rtol=rtol)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert np.any(y_pred3 != y7)
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert n_pos2 > n_pos
def test_qda_store_covariance():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert not hasattr(clf, 'covariance_')
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariance=True).fit(X6, y6)
assert hasattr(clf, 'covariance_')
assert_array_almost_equal(
clf.covariance_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariance_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert np.any(y_pred != y6)
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
@pytest.mark.parametrize("solver", ['svd, lsqr', 'eigen'])
def test_raises_value_error_on_same_number_of_classes_and_samples(solver):
"""
Tests that if the number of samples equals the number
of classes, a ValueError is raised.
"""
X = np.array([[0.5, 0.6], [0.6, 0.5]])
y = np.array(["a", "b"])
clf = LinearDiscriminantAnalysis(solver=solver)
with pytest.raises(ValueError, match="The number of samples must be more"):
clf.fit(X, y)
| bsd-3-clause |
Unidata/siphon | examples/ndbc/latest_request.py | 1 | 1478 | # Copyright (c) 2018 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
NDBC Latest Data Request
========================
This example shows how to use siphon's `simplewebswervice` support query the most recent
observations from all of the NDBC buoys at once.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
from siphon.simplewebservice.ndbc import NDBC
####################################################
# Get a pandas data frame of all of the observations
df = NDBC.latest_observations()
df.head()
####################################################
# In this case I'm going to drop buoys that do not have water temperature measurements.
df.dropna(subset=['water_temperature'], inplace=True)
####################################################
# Let's make a simple plot of the buoy positions and color by water temperature
proj = ccrs.LambertConformal(central_latitude=45., central_longitude=-100.,
standard_parallels=[30, 60])
fig = plt.figure(figsize=(17., 11.))
ax = plt.axes(projection=proj)
ax.coastlines('50m', edgecolor='black')
ax.add_feature(cfeature.OCEAN.with_scale('50m'))
ax.add_feature(cfeature.LAND.with_scale('50m'))
ax.set_extent([-85, -75, 25, 30], ccrs.PlateCarree())
ax.scatter(df['longitude'], df['latitude'], c=df['water_temperature'],
transform=ccrs.PlateCarree())
plt.show()
| bsd-3-clause |
lioritan/Thesis | small_datasets_maker.py | 1 | 2307 | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 03 18:31:46 2015
@author: liorf
"""
import cPickle
from random import sample
from numpy import *
from sklearn.datasets import fetch_20newsgroups
def make_subset(data, labels, cat_size, legal_cats=None):
'''
cat_size= desired size for each label
Note: this expects data only from labels given.
'''
# print len(data), len(labels)
new_data= []
new_labels= []
categories= frozenset(labels)
if legal_cats is not None:
categories= frozenset(legal_cats)
for cat in categories:
inds= find(labels==cat)
sub_inds= sample(inds, cat_size)
for ind in sub_inds:
new_data.append(data[ind])
new_labels.append(labels[ind])
return array(new_data, dtype=object), array(new_labels)
#pick cat_size inds at randm, then put them in...
if __name__=='__main__':
pass
#do for OHSUMED, OHSUMED titles only, 20NG
#50 train for each cat+50 test -> 100xnum_cats
#ohsumed datasets: needs more things (need to first filter out the categories!)
# with open('./problems/ohsumed_dataset_parsed.pkl', 'rb') as fptr:
# ((data, labels), (_,_))= cPickle.load(fptr)
# data,labels= array(data), array(labels)
# (data, labels)= make_subset(data, labels, 100, [1,4,6,8,10,12,14,20,21,23])
# with open('./problems/ohsumed_small_subset.pkl','wb') as fptt:
# cPickle.dump((data,labels), fptt, -1)
# print 'one'
# with open('./problems/ohsumed_titles_parsed_complete.pkl', 'rb') as fptr:
# (data, labels)= cPickle.load(fptr)
# data,labels= array(data), array(labels)
# (data, labels)= make_subset(data, labels, 100, [1, 4, 6, 8, 10, 13, 14, 17, 20, 23])
# with open('./problems/ohsumed_titles_only_small_subset.pkl','wb') as fptt:
# cPickle.dump((data,labels), fptt, -1)
# print 'two'
# newsgroups = fetch_20newsgroups(subset='all', remove=('headers', 'footers', 'quotes'))
# fixed_data = array([s.lower().replace('\n','').split(' ') for s in newsgroups.data])
# (data, labels)= make_subset(fixed_data, newsgroups.target, 100)
# with open('./problems/20NG_small_subset.pkl', 'wb') as fptr:
# cPickle.dump((data, labels), fptr, -1)
# print 'three' | gpl-2.0 |
JT5D/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 3 | 6966 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012
# License: BSD 3 clause
from tempfile import mkdtemp
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster.hierarchical import _hc_cut
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.utils.testing import assert_warns
def test_structured_ward_tree():
"""
Check that we obtain the correct solution for structured ward tree.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_components, n_leaves, parent = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError, ward_tree, X.T, np.ones((4, 4)))
def test_unstructured_ward_tree():
"""
Check that we obtain the correct solution for unstructured ward tree.
"""
rnd = np.random.RandomState(0)
X = rnd.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
children, n_nodes, n_leaves, parent = assert_warns(UserWarning,
ward_tree,
this_X.T,
n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_ward_tree():
"""
Check that the height of ward tree is sorted.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_nodes, n_leaves, parent = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_ward_clustering():
"""
Check that we obtain the correct number of clusters with Ward clustering.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(100, 50)
connectivity = grid_to_graph(*mask.shape)
clustering = Ward(n_clusters=10, connectivity=connectivity)
clustering.fit(X)
# test caching
clustering = Ward(n_clusters=10, connectivity=connectivity,
memory=mkdtemp())
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
# Turn caching off now
clustering = Ward(n_clusters=10, connectivity=connectivity)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
np.testing.assert_array_equal(clustering.labels_, labels)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = Ward(n_clusters=10,
connectivity=connectivity.todense())
assert_raises(TypeError, clustering.fit, X)
clustering = Ward(n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.todense()[:10, :10]))
assert_raises(ValueError, clustering.fit, X)
def test_ward_agglomeration():
"""
Check that we obtain the correct solution in a simplistic case
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
assert_true(np.size(np.unique(ward.labels_)) == 5)
Xred = ward.transform(X)
assert_true(Xred.shape[1] == 5)
Xfull = ward.inverse_transform(Xred)
assert_true(np.unique(Xfull[0]).size == 5)
assert_array_almost_equal(ward.transform(Xfull), Xred)
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit ward with full connectivity (i.e. unstructured) vs scipy
"""
from scipy.sparse import lil_matrix
n, p, k = 10, 5, 3
rnd = np.random.RandomState(0)
connectivity = lil_matrix(np.ones((n, n)))
for i in range(5):
X = .1 * rnd.normal(size=(n, p))
X -= 4 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = ward_tree(X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_popagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
from sklearn.neighbors import NearestNeighbors
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
nn = NearestNeighbors(n_neighbors=10).fit(X)
connectivity = nn.kneighbors_graph(X)
ward = Ward(n_clusters=4, connectivity=connectivity)
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_connectivity_fixing_non_lil():
"""
Check non regression of a bug if a non item assignable connectivity is
provided with more than one component.
"""
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = Ward(connectivity=c)
assert_warns(UserWarning, w.fit, x)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
RIOT-OS/RIOT | tests/pkg_cmsis-nn/generate_image.py | 15 | 1140 | #!/usr/bin/env python3
"""Generate a binary file from a sample image of the CIFAR-10 dataset.
Pixel of the sample are stored as uint8, images have size 32x32x3.
"""
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main(args):
_, (cifar10_test, _) = cifar10.load_data()
data = cifar10_test[args.index]
data = data.astype('uint8')
output_path = os.path.join(SCRIPT_DIR, args.output)
np.ndarray.tofile(data, output_path)
if args.no_plot is False:
plt.imshow(data)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--index", type=int, default=0,
help="Image index in CIFAR test dataset")
parser.add_argument("-o", "--output", type=str, default='input',
help="Output filename")
parser.add_argument("--no-plot", default=False, action='store_true',
help="Disable image display in matplotlib")
main(parser.parse_args())
| lgpl-2.1 |
hitszxp/scikit-learn | sklearn/manifold/locally_linear.py | 15 | 24841 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = np.asarray(X)
Z = np.asarray(Z)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
if X.dtype.kind == 'i':
X = X.astype(np.float)
if Z.dtype.kind == 'i':
Z = Z.astype(np.float)
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
paulcon/active_subspaces | setup.py | 1 | 1090 | from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='active_subspaces',
version='0.1',
description='Tools to apply active subspaces to analyze models and data.',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics'
],
keywords='dimension reduction mathematics active subspaces uncertainty quantification uq',
url='https://github.com/paulcon/active_subspaces',
author='Paul Constantine',
author_email='[email protected]',
license='MIT',
packages=['active_subspaces', 'active_subspaces.utils'],
install_requires=[
'numpy',
'scipy >= 0.15.0',
'matplotlib'
],
test_suite='nose.collector',
tests_require=['nose'],
include_package_data=True,
zip_safe=False)
| mit |
faner-father/tushare | tushare/datayes/master.py | 17 | 4457 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Master():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def SecID(self, assetClass='', cnSpell='', partyID='', ticker='', field=''):
"""
通过机构partyID,或证券简称拼音cnSpell,或证券交易代码ticker,
检索证券ID(证券在数据结构中的一个唯一识别的编码),
也可通过证券简称拼音cnSpell检索证券交易代码ticker等;同时可以获取输入证券的基本上市信息,如交易市场,上市状态,交易币种,ISIN编码等。
"""
code, result = self.client.getData(vs.SECID%(assetClass, cnSpell, partyID, ticker, field))
return _ret_data(code, result)
def TradeCal(self, exchangeCD='', beginDate='', endDate='', field=''):
"""
记录了上海证券交易所,深圳证券交易所,中国银行间市场,大连商品交易所,郑州商品交易所,上海期货交易所,
中国金融期货交易所和香港交易所等交易所在日历日期当天是否开市的信息,
其中上证、深证记录了自成立以来的全部日期是否开始信息。各交易日节假日安排通知发布当天即更新数据。
"""
code, result = self.client.getData(vs.TRADECAL%(exchangeCD, beginDate, endDate, field))
return _ret_data(code, result)
def Industry(self, industryVersion='', industryVersionCD='', industryLevel='', isNew='', field=''):
"""
输入行业分类通联编码(如,010303表示申万行业分类2014版)或输入一个行业分类标准名称,获取行业分类标准下行业划分
"""
code, result = self.client.getData(vs.INDUSTRY%(industryVersion, industryVersionCD,
industryLevel, isNew, field))
return _ret_data(code, result)
def SecTypeRel(self, secID='', ticker='', typeID='', field=''):
"""
记录证券每个分类的成分,证券分类可通过在getSecType获取。
"""
code, result = self.client.getData(vs.SECTYPEREL%(secID, ticker, typeID, field))
return _ret_data(code, result)
def EquInfo(self, ticker='', field=''):
"""
根据拼音或股票代码,匹配股票代码、名称。包含正在上市的全部沪深股票。
"""
code, result = self.client.getData(vs.EQUINFO%(ticker, field))
return _ret_data(code, result)
def SecTypeRegionRel(self, secID='', ticker='', typeID='', field=''):
"""
获取沪深股票地域分类,以注册地所在行政区域为标准。
"""
code, result = self.client.getData(vs.SECTYPEREGIONREL%(secID, ticker, typeID, field))
return _ret_data(code, result)
def SecType(self, field=''):
"""
证券分类列表,一级分类包含有沪深股票、港股、基金、债券、期货、期权等,每个分类又细分有不同类型;可一次获取全部分类。
"""
code, result = self.client.getData(vs.SECTYPE%(field))
return _ret_data(code, result)
def SecTypeRegion(self, field=''):
"""
获取中国地域分类,以行政划分为标准。
"""
code, result = self.client.getData(vs.SECTYPEREGION%(field))
return _ret_data(code, result)
def SysCode(self, codeTypeID='', valueCD='', field=''):
"""
各api接口有枚举值特性的输出列,如getSecID输出项exchangeCD值,编码分别代表的是什么市场,所有枚举值都可以在这个接口获取。
"""
code, result = self.client.getData(vs.SYSCODE%(codeTypeID, valueCD, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
admcrae/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 46 | 15782 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun"
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
feed_dict = {key: np.asarray(item) for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
dsullivan7/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
massmutual/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
fy2462/apollo | modules/tools/mapshow/roadshow.py | 2 | 1345 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import argparse
import matplotlib.pyplot as plt
from map import Map
from localization import Localization
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Raodshow is a tool to display road info on a map.",
prog="roadshow.py")
parser.add_argument(
"-m", "--map", action="store", type=str, required=True,
help="Specify the map file in txt or binary format")
args = parser.parse_args()
map = Map()
map.load(args.map)
map.draw_roads(plt)
plt.axis('equal')
plt.show()
| apache-2.0 |
juliantaylor/scipy | scipy/signal/fir_filter_design.py | 16 | 20091 | """Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez']
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta : float
The beta parameter for the kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=0)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.signal.firwin2
Examples
--------
Low-pass from 0 to f::
>>> from scipy import signal
>>> signal.firwin(numtaps, f)
Use a specific window function::
>>> signal.firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> signal.firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> signal.firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0, antisymmetric=False):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
antisymmetric : bool
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the Nyquist rate.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero and Nyquist rates.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| bsd-3-clause |
Caoimhinmg/PmagPy | programs/watsons_v.py | 1 | 5216 | #!/usr/bin/env python
from __future__ import print_function
from builtins import input
from builtins import range
import sys
import os
import numpy
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pmagpy.pmagplotlib as pmagplotlib
import pmagpy.pmag as pmag
def main():
"""
NAME
watsons_v.py
DESCRIPTION
calculates Watson's V statistic from input files
INPUT FORMAT
takes dec/inc as first two columns in two space delimited files
SYNTAX
watsons_v.py [command line options]
OPTIONS
-h prints help message and quits
-f FILE (with optional second)
-f2 FILE (second file)
-ant, flip antipodal directions to opposite direction
in first file if only one file or flip all in second, if two files
-P (don't save or show plot)
-sav save figure and quit silently
-fmt [png,svg,eps,pdf,jpg] format for saved figure
OUTPUT
Watson's V and the Monte Carlo Critical Value Vc.
in plot, V is solid and Vc is dashed.
"""
Flip=0
show,plot=1,0
fmt='svg'
file2=""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
if '-ant' in sys.argv: Flip=1
if '-sav' in sys.argv: show,plot=0,1 # don't display, but do save plot
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
if '-P' in sys.argv: show=0 # don't display or save plot
if '-f' in sys.argv:
ind=sys.argv.index('-f')
file1=sys.argv[ind+1]
data=numpy.loadtxt(file1).transpose()
D1=numpy.array([data[0],data[1]]).transpose()
file1_name=os.path.split(file1)[1].split('.')[0]
else:
print("-f is required")
print(main.__doc__)
sys.exit()
if '-f2' in sys.argv:
ind=sys.argv.index('-f2')
file2=sys.argv[ind+1]
data2=numpy.loadtxt(file2).transpose()
D2=numpy.array([data2[0],data2[1]]).transpose()
file2_name=os.path.split(file2)[1].split('.')[0]
if Flip==1:
D2,D=pmag.flip(D2) # D2 are now flipped
if len(D2)!=0:
if len(D)!=0:
D2=numpy.concatenate(D,D2) # put all in D2
elif len(D)!=0:
D2=D
else:
print('length of second file is zero')
sys.exit()
elif Flip==1:D2,D1=pmag.flip(D1) # peel out antipodal directions, put in D2
#
counter,NumSims=0,5000
#
# first calculate the fisher means and cartesian coordinates of each set of Directions
#
pars_1=pmag.fisher_mean(D1)
pars_2=pmag.fisher_mean(D2)
#
# get V statistic for these
#
V=pmag.vfunc(pars_1,pars_2)
#
# do monte carlo simulation of datasets with same kappas, but common mean
#
Vp=[] # set of Vs from simulations
if show==1:print("Doing ",NumSims," simulations")
for k in range(NumSims):
counter+=1
if counter==50:
if show==1:print(k+1)
counter=0
Dirp=[]
# get a set of N1 fisher distributed vectors with k1, calculate fisher stats
for i in range(pars_1["n"]):
Dirp.append(pmag.fshdev(pars_1["k"]))
pars_p1=pmag.fisher_mean(Dirp)
# get a set of N2 fisher distributed vectors with k2, calculate fisher stats
Dirp=[]
for i in range(pars_2["n"]):
Dirp.append(pmag.fshdev(pars_2["k"]))
pars_p2=pmag.fisher_mean(Dirp)
# get the V for these
Vk=pmag.vfunc(pars_p1,pars_p2)
Vp.append(Vk)
#
# sort the Vs, get Vcrit (95th one)
#
Vp.sort()
k=int(.95*NumSims)
if show==1:
print("Watson's V, Vcrit: ")
print(' %10.1f %10.1f'%(V,Vp[k]))
if show==1 or plot==1:
print("Watson's V, Vcrit: ")
print(' %10.1f %10.1f'%(V,Vp[k]))
CDF={'cdf':1}
pmagplotlib.plot_init(CDF['cdf'],5,5)
pmagplotlib.plotCDF(CDF['cdf'],Vp,"Watson's V",'r',"")
pmagplotlib.plotVs(CDF['cdf'],[V],'g','-')
pmagplotlib.plotVs(CDF['cdf'],[Vp[k]],'b','--')
if plot==0:pmagplotlib.drawFIGS(CDF)
files={}
if pmagplotlib.isServer: # use server plot naming convention
if file2!="":
files['cdf']='watsons_v_'+file1+'_'+file2+'.'+fmt
else:
files['cdf']='watsons_v_'+file1+'.'+fmt
else: # use more readable plot naming convention
if file2!="":
files['cdf']='watsons_v_'+file1_name+'_'+file2_name+'.'+fmt
else:
files['cdf']='watsons_v_'+file1_name+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['cdf']='Cumulative Distribution'
CDF = pmagplotlib.addBorders(CDF,titles,black,purple)
pmagplotlib.saveP(CDF,files)
elif plot==0:
ans=input(" S[a]ve to save plot, [q]uit without saving: ")
if ans=="a": pmagplotlib.saveP(CDF,files)
if plot==1: # save and quit silently
pmagplotlib.saveP(CDF,files)
if __name__ == "__main__":
main()
| bsd-3-clause |
equialgo/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 63 | 3231 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
blackecho/Deep-Belief-Network | yadlt/core/supervised_model.py | 2 | 3561 | """Supervised Model scheleton."""
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from yadlt.core.model import Model
from yadlt.utils import tf_utils
class SupervisedModel(Model):
"""Supervised Model scheleton.
The interface of the class is sklearn-like.
Methods
-------
* fit(): model training procedure.
* predict(): model inference procedure (predict labels).
* score(): model scoring procedure (mean accuracy).
"""
def __init__(self, name):
"""Constructor."""
Model.__init__(self, name)
def fit(self, train_X, train_Y, val_X=None, val_Y=None, graph=None):
"""Fit the model to the data.
Parameters
----------
train_X : array_like, shape (n_samples, n_features)
Training data.
train_Y : array_like, shape (n_samples, n_classes)
Training labels.
val_X : array_like, shape (N, n_features) optional, (default = None).
Validation data.
val_Y : array_like, shape (N, n_classes) optional, (default = None).
Validation labels.
graph : tf.Graph, optional (default = None)
Tensorflow Graph object.
Returns
-------
"""
if len(train_Y.shape) != 1:
num_classes = train_Y.shape[1]
else:
raise Exception("Please convert the labels with one-hot encoding.")
g = graph if graph is not None else self.tf_graph
with g.as_default():
# Build model
self.build_model(train_X.shape[1], num_classes)
with tf.Session() as self.tf_session:
# Initialize tf stuff
summary_objs = tf_utils.init_tf_ops(self.tf_session)
self.tf_merged_summaries = summary_objs[0]
self.tf_summary_writer = summary_objs[1]
self.tf_saver = summary_objs[2]
# Train model
self._train_model(train_X, train_Y, val_X, val_Y)
# Save model
self.tf_saver.save(self.tf_session, self.model_path)
def predict(self, test_X):
"""Predict the labels for the test set.
Parameters
----------
test_X : array_like, shape (n_samples, n_features)
Test data.
Returns
-------
array_like, shape (n_samples,) : predicted labels.
"""
with self.tf_graph.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {
self.input_data: test_X,
self.keep_prob: 1
}
return self.mod_y.eval(feed)
def score(self, test_X, test_Y):
"""Compute the mean accuracy over the test set.
Parameters
----------
test_X : array_like, shape (n_samples, n_features)
Test data.
test_Y : array_like, shape (n_samples, n_features)
Test labels.
Returns
-------
float : mean accuracy over the test set
"""
with self.tf_graph.as_default():
with tf.Session() as self.tf_session:
self.tf_saver.restore(self.tf_session, self.model_path)
feed = {
self.input_data: test_X,
self.input_labels: test_Y,
self.keep_prob: 1
}
return self.accuracy.eval(feed)
| apache-2.0 |
armbrustlab/seaflowpy | tests/test_sample.py | 1 | 5825 | import datetime
import os
import numpy as np
import pandas as pd
import pytest
import pytz
import seaflowpy as sfp
# pylint: disable=redefined-outer-name
@pytest.fixture()
def tmpout(tmpdir):
"""Setup to test sampling workflow"""
evtpaths = [
"tests/testcruise_evt/2014_185/2014-07-04T00-00-02+00-00",
"tests/testcruise_evt/2014_185/2014-07-04T00-03-02+00-00.gz",
]
file_ids = [
"2014_185/2014-07-04T00-00-02+00-00",
"2014_185/2014-07-04T00-03-02+00-00",
]
dates = [
datetime.datetime.fromisoformat("2014-07-04T00:00:02+00:00"),
datetime.datetime.fromisoformat("2014-07-04T00:03:02+00:00"),
]
dates = [d.replace(tzinfo=pytz.utc) for d in dates]
dates_lookup = dict(zip(file_ids, dates))
return {
"evtpaths": evtpaths,
"file_ids": file_ids,
"tmpdir": str(tmpdir),
"dates": dates_lookup
}
class TestSample:
def test_sample_evt_single(self, tmpout):
outpath = os.path.join(tmpout["tmpdir"], "test.gz")
results, errs = sfp.sample.sample(
tmpout["evtpaths"], 20000, outpath, noise_filter=False, seed=12345
)
assert len(errs) == 0
assert len(results) == 2
assert [r["file_id"] for r in results] == tmpout["file_ids"]
assert [r["events"] for r in results] == [40000, 40000]
assert [r["events_postfilter"] for r in results] == [40000, 40000]
assert [r["events_postsampling"] for r in results] == [10000, 10000]
df = pd.read_parquet(outpath)
assert len(df.index) == 20000
assert len(df[(df["D1"] == 0) & (df["D2"] == 0) & (df["fsc_small"] == 0)]) > 0
def test_sample_evt_single_noise_filter(self, tmpout):
outpath = os.path.join(tmpout["tmpdir"], "test.gz")
results, errs = sfp.sample.sample(
tmpout["evtpaths"], 20000, outpath, noise_filter=True, seed=12345
)
assert len(errs) == 0
assert len(results) == 2
assert [r["file_id"] for r in results] == tmpout["file_ids"]
assert [r["events"] for r in results] == [40000, 40000]
assert [r["events_postfilter"] for r in results] == [39928, 39925]
assert [r["events_postsampling"] for r in results] == [10000, 10000]
df = pd.read_parquet(outpath)
assert len(df.index) == 20000
assert len(df[(df["D1"] == 0) & (df["D2"] == 0) & (df["fsc_small"] == 0)]) == 0
def test_sample_evt_single_min_filter(self, tmpout):
outpath = os.path.join(tmpout["tmpdir"], "test.gz")
results, errs = sfp.sample.sample(
tmpout["evtpaths"],
20000,
outpath,
min_fsc=1,
min_chl=25000,
min_pe=25000,
seed=12345,
)
assert len(errs) == 0
assert len(results) == 2
assert [r["file_id"] for r in results] == tmpout["file_ids"]
assert [r["events"] for r in results] == [40000, 40000]
assert [r["events_postfilter"] for r in results] == [379, 471]
assert [r["events_postsampling"] for r in results] == [379, 471]
df = pd.read_parquet(outpath)
assert len(df.index) == 850
assert np.min(df["fsc_small"]) >= 1
assert np.min(df["pe"]) >= 25000
assert np.min(df["chl_small"]) >= 25000
def test_sample_evt_single_empty(self, tmpout):
outpath = os.path.join(tmpout["tmpdir"], "test.gz")
results, errs = sfp.sample.sample(
tmpout["evtpaths"],
20000,
outpath,
min_fsc=60000,
min_chl=60000,
min_pe=60000,
seed=12345,
)
assert len(errs) == 0
assert len(results) == 2
assert [r["file_id"] for r in results] == tmpout["file_ids"]
assert [r["events"] for r in results] == [40000, 40000]
assert [r["events_postfilter"] for r in results] == [0, 0]
assert [r["events_postsampling"] for r in results] == [0, 0]
df = pd.read_parquet(outpath)
assert len(df.index) == 0
def test_sample_evt_single_dates(self, tmpout):
outpath = os.path.join(tmpout["tmpdir"], "test.gz")
results, errs = sfp.sample.sample(
tmpout["evtpaths"],
20000,
outpath,
dates=tmpout["dates"],
noise_filter=False,
seed=12345
)
assert len(errs) == 0
assert len(results) == 2
assert [r["file_id"] for r in results] == tmpout["file_ids"]
df = pd.read_parquet(outpath)
assert len(df) == 20000
assert len(df["date"].unique()) == 2
assert len(df.head(10000)["date"].unique()) == 1
assert df.head(10000)["date"].unique()[0].isoformat() == "2014-07-04T00:00:02+00:00"
assert df.tail(10000)["date"].unique()[0].isoformat() == "2014-07-04T00:03:02+00:00"
def test_sample_evt_multi(self, tmpout):
outpath = os.path.join(tmpout["tmpdir"], "testdir")
results, errs = sfp.sample.sample(
tmpout["evtpaths"], 20000, outpath, multi=True, seed=12345
)
assert len(errs) == 0
assert len(results) == 2
assert [r["file_id"] for r in results] == tmpout["file_ids"]
assert [r["events"] for r in results] == [40000, 40000]
assert [r["events_postfilter"] for r in results] == [40000, 40000]
assert [r["events_postsampling"] for r in results] == [20000, 20000]
df = pd.read_parquet(outpath)
assert (
len(df[(df["D1"] == 0) & (df["D2"] == 0) & (df["fsc_small"] == 0)]) > 0
)
gb = df.groupby("file_id")
assert gb.ngroups == 2
assert list(gb.groups.keys()) == tmpout["file_ids"]
assert [len(g) for g in gb.groups.values()] == [20000, 20000]
| gpl-3.0 |
pap/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/projections/__init__.py | 69 | 2179 | from geo import AitoffAxes, HammerAxes, LambertAxes
from polar import PolarAxes
from matplotlib import axes
class ProjectionRegistry(object):
"""
Manages the set of projections available to the system.
"""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""
Register a new set of projection(s).
"""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""
Get a projection class from its *name*.
"""
return self._all_projection_types[name]
def get_projection_names(self):
"""
Get a list of the names of all projections currently
registered.
"""
names = self._all_projection_types.keys()
names.sort()
return names
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is
returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError:
raise ValueError("Unknown projection '%s'" % projection)
def projection_factory(projection, figure, rect, **kwargs):
"""
Get a new projection instance.
*projection* is a projection name.
*figure* is a figure to add the axes to.
*rect* is a :class:`~matplotlib.transforms.Bbox` object specifying
the location of the axes within the figure.
Any other kwargs are passed along to the specific projection
constructor being used.
"""
return get_projection_class(projection)(figure, rect, **kwargs)
def get_projection_names():
"""
Get a list of acceptable projection names.
"""
return projection_registry.get_projection_names()
| agpl-3.0 |
rbharath/vs-utils | vs_utils/utils/target_utils.py | 2 | 13957 | """
Utilities for parsing target files from different sources (e.g. PubChem
BioAssay).
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
from collections import OrderedDict
import gzip
import numpy as np
import pandas as pd
import warnings
from vs_utils.utils import read_pickle, SmilesGenerator
from vs_utils.utils.rdkit_utils import serial
class AssayDataParser(object):
"""
Parse assay data files.
Parameters
----------
data_filename : str
Data filename.
map_filename : str
Compound ID->SMILES map filename.
primary_key : str
Name of column containing compound IDs.
id_prefix : str, optional
Prefix to prepend to compound IDs for mapping compound IDs to SMILES.
activity_key : str, optional
Name of column containing compound activity assignments. Must be
provided if column_indices is None. If both activity_key and
column_indices are set, column_indices will be used.
activity_value : str, optional
Value of positive class in activity_key. For example, 'Active' when
parsing PubChem BioAssay data.
column_indices : list, optional
Data column indices to include. Must be provided if activity_key is
None. If both activity_key and column_indices are set, column_indices
will be used.
delimiter : str, optional (default '\t')
Delimiter to use when parsing data file.
"""
def __init__(self, data_filename, map_filename, primary_key,
id_prefix=None, activity_key=None, activity_value=None,
column_indices=None, delimiter='\t'):
self.data_filename = data_filename
self.map_filename = map_filename
self.primary_key = primary_key
self.id_prefix = id_prefix
if activity_key is None and column_indices is None:
raise ValueError(
'One of activity_key or column_indices must be set.')
if activity_key is not None and activity_value is None:
raise ValueError(
'You must set activity_value when using activity_key.')
self.activity_key = activity_key
self.activity_value = activity_value
if column_indices is not None:
column_indices = np.asarray(column_indices, dtype=int)
self.column_indices = column_indices
self.delimiter = delimiter
def get_targets(self):
"""
Parse data file and return targets and corresponding SMILES.
Procedure
---------
1. Read data and get unique rows by compound ID.
2. Map compound IDs to SMILES.
3. Extract targets from data.
"""
data = self.read_data()
id_map = read_pickle(self.map_filename)
# get compound SMILES from map
# indices are for data rows successfully mapped to SMILES
smiles, indices = self.map_ids_to_smiles(data[self.primary_key],
id_map)
# get targets
if self.column_indices is not None:
targets = np.zeros((data.shape[0], len(self.column_indices)),
dtype=float)
for i, idx in enumerate(self.column_indices):
targets[:, i] = data[data.columns[idx]]
else:
targets = np.asarray(
data[self.activity_key] == self.activity_value)
targets = targets[indices] # reduce targets to matched structures
return smiles, targets
def read_data(self, **kwargs):
"""
Read assay data file.
Parameters
----------
kwargs : dict, optional
Keyword arguments for pd.read_table.
"""
if self.data_filename.endswith('.gz'):
with gzip.open(self.data_filename) as f:
df = pd.read_table(f, sep=self.delimiter, **kwargs)
else:
df = pd.read_table(self.data_filename, sep=self.delimiter,
**kwargs)
df = df.drop_duplicates(self.primary_key) # remove duplicate IDs
return df
def map_ids_to_smiles(self, ids, id_map):
"""
Look up SMILES for compound IDs in a compound ID->SMILES map.
Parameters
----------
ids : array_like
List of compound IDs.
id_map : dict
Compound ID->SMILES map.
"""
smiles = []
indices = []
for i, this_id in enumerate(ids):
if np.isnan(this_id):
continue
try:
this_id = int(this_id) # CIDs are often read in as floats
except ValueError:
pass
if self.id_prefix is not None:
# no bare IDs allowed in maps
this_id = '{}{}'.format(self.id_prefix, this_id)
if this_id in id_map:
smiles.append(id_map[this_id])
indices.append(i)
return np.asarray(smiles), np.asarray(indices)
def get_column_names(self):
"""
Get names of selected data columns.
"""
if self.column_indices is None:
return
names = []
for i in self.column_indices:
names.append(self.read_data().columns[i])
return names
class PcbaParser(AssayDataParser):
"""
Parse PubChem BioAssay (PCBA) target files.
Parameters
----------
data_filename : str
Data filename.
map_filename : str
Compound ID->SMILES map filename.
primary_key : str, optional (default 'PUBCHEM_CID')
Name of column containing compound IDs.
id_prefix : str, optional (default 'CID')
Prefix to prepend to compound IDs for mapping compound IDs to SMILES.
activity_key : str, optional (default 'PUBCHEM_ACTIVITY_OUTCOME')
Name of column containing compound activity assignments. Must be
provided if column_indices is None. If both activity_key and
column_indices are set, column_indices will be used.
activity_value : str, optional (default 'Active')
Value of positive class in activity_key. For example, 'Active' when
parsing PubChem BioAssay data.
column_indices : list, optional
Data column indices to include. Must be provided if activity_key is
None. If both activity_key and column_indices are set, column_indices
will be used.
delimiter : str, optional (default ',')
Delimiter to use when parsing data file.
"""
def __init__(self, data_filename, map_filename, primary_key='PUBCHEM_CID',
id_prefix='CID', activity_key='PUBCHEM_ACTIVITY_OUTCOME',
activity_value='Active', column_indices=None, delimiter=','):
super(PcbaParser, self).__init__(
data_filename, map_filename, primary_key, id_prefix, activity_key,
activity_value, column_indices, delimiter)
class Nci60Parser(AssayDataParser):
"""
Parse NCI60 target file.
Parameters
----------
data_filename : str
Data filename.
map_filename : str
Compound ID->SMILES map filename.
primary_key : str, optional (default 'NSC')
Name of column containing compound IDs.
id_prefix : str, optional (default 'NSC')
Prefix to prepend to compound IDs for mapping compound IDs to SMILES.
activity_key : str, optional
Name of column containing compound activity assignments. Must be
provided if column_indices is None. If both activity_key and
column_indices are set, column_indices will be used.
activity_value : str, optional
Value of positive class in activity_key. For example, 'Active' when
parsing PubChem BioAssay data.
column_indices : list, optional (default range(4, 64))
Data column indices to include. Must be provided if activity_key is
None. If both activity_key and column_indices are set, column_indices
will be used.
delimiter : str, optional (default '\t')
Delimiter to use when parsing data file.
"""
def __init__(self, data_filename, map_filename, primary_key='NSC',
id_prefix='NSC', activity_key=None, activity_value=None,
column_indices=range(4, 64), delimiter='\t'):
super(Nci60Parser, self).__init__(
data_filename, map_filename, primary_key, id_prefix, activity_key,
activity_value, column_indices, delimiter)
def read_data(self, **kwargs):
"""
Read assay data file.
Parameters
----------
kwargs : dict, optional
Keyword arguments for pd.read_table.
"""
# treat '-' and 'na' values as NaNs
return super(Nci60Parser, self).read_data(na_values=['-', 'na'])
def split_targets(self):
"""
Split targets among different assays.
"""
df = self.read_data()
names = df.columns[self.column_indices]
smiles, targets = self.get_targets()
split_targets = OrderedDict()
for i, name in enumerate(names):
keep = ~np.isnan(targets[:, i])
if not np.count_nonzero(keep):
warnings.warn(
'Assay "{}" has no matching records.'.format(name))
continue
split_targets[name] = {'smiles': smiles[keep],
'targets': targets[keep]}
return split_targets
class Tox21Parser(object):
"""
Parse Tox21 data files.
Parameters
----------
filename : str
Data filename.
merge_strategy : str, optional (default 'max')
Strategy to use when merging targets for duplicated molecules. Choose
from 'max' (active if active in any assay), 'min' (inactive if inactive
in any assay), 'majority_pos' (majority vote with ties assigned
active), or 'majority_neg' (majority vote with ties assigned inactive).
"""
dataset_names = ['NR-AR', 'NR-AhR', 'NR-AR-LBD', 'NR-ER', 'NR-ER-LBD',
'NR-Aromatase', 'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5',
'SR-HSE', 'SR-MMP', 'SR-p53']
def __init__(self, filename, merge_strategy='max'):
self.filename = filename
assert merge_strategy in ['max', 'min', 'majority_pos', 'majority_neg']
self.merge_strategy = merge_strategy
def read_data(self):
"""
Read labeled molecules.
"""
with serial.MolReader().open(self.filename) as reader:
mols = list(reader)
return mols
def read_targets(self):
"""
Get labels for molecules from SD data fields matching dataset names.
Returns
-------
data : dict
Nested dictionary containing SMILES and targets for compounds in
each dataset. Keyed by data->dataset->SMILES->target, where target
is a list.
"""
engine = SmilesGenerator()
data = {dataset: {} for dataset in self.dataset_names}
skipped = []
for mol in self.read_data():
smiles = engine.get_smiles(mol)
for prop in list(mol.GetPropNames()):
if prop in data:
score = int(mol.GetProp(prop))
if smiles not in data[prop]:
data[prop][smiles] = []
data[prop][smiles].append(score)
else: # skip irrelevant SD fields
if prop not in skipped:
skipped.append(prop)
continue
print 'Skipped properties:\n{}'.format('\n'.join(skipped))
return data
def merge_targets(self, data):
"""
Merge labels for duplicate molecules according to a specified merge
stratecy ('max', 'min', 'majority_pos', 'majority_neg').
Parameters
----------
data : dict
Nested dictionary containing SMILES and targets for compounds in
each dataset. Keyed by data->dataset->SMILES->target, where target
is a list.
Returns
-------
data : dict
Nested dictionary containing SMILES and targets for compounds in
each dataset. Keyed by data->dataset->SMILES->target, where target
is an integer.
"""
for dataset in self.dataset_names:
for smiles, targets in data[dataset].items():
targets = np.asarray(targets, dtype=int)
if self.merge_strategy == 'max':
data[dataset][smiles] = max(targets)
elif self.merge_strategy == 'min':
data[dataset][smiles] = min(targets)
# 0.5 rounds down
elif self.merge_strategy == 'majority_neg':
data[dataset][smiles] = int(np.round(np.mean(targets)))
# 0.5 rounds up
elif self.merge_strategy == 'majority_pos':
data[dataset][smiles] = (int(np.round(
np.mean(targets) + 1)) - 1)
return data
def get_targets(self):
"""
Get SMILES and targets for each Tox21 dataset.
"""
split_targets = {}
data = self.merge_targets(self.read_targets())
for dataset in data:
if not len(data[dataset]):
warnings.warn('Dataset "{}" is empty'.format(dataset))
continue
smiles, targets = [], []
for this_smiles, target in data[dataset].items():
smiles.append(this_smiles)
targets.append(target)
split_targets[dataset] = {'smiles': np.asarray(smiles),
'targets': np.asarray(
targets, dtype=int)}
return split_targets
| gpl-3.0 |
stevemarple/AuroraWatchNet | software/server/bin/make_awn_plots.py | 1 | 33954 | #!/usr/bin/env python
import logging
logger = logging.getLogger(__name__)
import argparse
import copy
import io
import os
import sys
import time
import traceback
import pyexiv2
import numpy as np
from numpy.f2py.auxfuncs import throw_error
import matplotlib as mpl
from logging import exception
if os.environ.get('DISPLAY', '') == '':
mpl.use('Agg')
import matplotlib.pyplot as plt
import auroraplot as ap
import auroraplot.dt64tools as dt64
import auroraplot.magdata
import auroraplot.tools
import auroraplot.auroralactivity
import auroraplot.datasets.aurorawatchnet
import auroraplot.datasets.samnet
import auroraplot.datasets.dtu
import auroraplot.datasets.uit
# Set timezone appropriately to get intended np.datetime64 behaviour.
os.environ['TZ'] = 'UTC'
try:
time.tzset()
except Exception as e:
# Reminder that windows systems can't use tzset
logging.warning('Could not set timezone')
mpl.rcParams['figure.facecolor'] = 'w'
mpl.rcParams['legend.fontsize'] = 'medium'
def parse_datetime(s):
# Parse datetime relative to 'now' variable, which in test mode
# may not be the current time.
if s == 'tomorrow':
return tomorrow
elif s == 'now':
return now
elif s == 'today':
return today
elif s == 'yesterday':
return yesterday
else:
return np.datetime64(s).astype('M8[us]')
def date_generator():
t1 = start_time
while t1 < end_time:
t2 = t1 + day
yield t1, t2, False
t1 = t2
if args.rolling:
# Rolling ought to produce current day too
if t1 != dt64.floor(now, day):
t1 = dt64.floor(now, day)
t2 = t1 + day
yield t1, t2, False
t2 = dt64.ceil(now, np.timedelta64(1, 'h'))
t1 = t2 - day
yield t1, t2, True
def my_load_data(project, site, data_type, start_time, end_time, **kwargs):
if data_type not in ap.projects[project][site]['data_types']:
return None
r = ap.load_data(project, site, data_type, start_time, end_time, **kwargs)
if r is not None and args.test_mode:
# Remove any data after 'now' to emulate the correct behaviour
# when using historical data.
r.data[:,r.sample_end_time > now] = np.nan
return r
load_data = ap.load_data
def load_mag_data(project, site, start_time, end_time, **kwargs):
if 'MagData' not in ap.projects[project][site]['data_types']:
return None
# Load data day by day so that a memoize function can be used to
# cache daily values
mdl = []
t1 = start_time
while t1 < end_time:
t2 = t1 + day
md = load_data(project, site, 'MagData', t1, t2, **kwargs)
if md is not None:
# Ensure data gaps are marked as such in the plots. Straight lines
# across large gaps look bad!
mdl.append(md.mark_missing_data(
cadence=2*md.nominal_cadence))
t1 = t2
if len(mdl) == 0:
return None
r = ap.concatenate(mdl)
r.start_time = start_time
r.end_time = end_time
if args.test_mode:
# Remove any data after 'now' to emulate the correct behaviour
# when using historical data.
r.data[:,r.sample_end_time > now] = np.nan
return r
def compute_mag_qdc_t(st):
'''Compute QDC time.
For the 4th or later in the month load the previous
month, otherwise go back two months. This gives a few days for
data to be transferred and QDCs to be made and checked.'''
qdc_t = dt64.get_start_of_previous_month(st)
if dt64.get_day_of_month(st) < 4:
qdc_t = dt64.get_start_of_previous_month(qdc_t)
return qdc_t
def fit_qdc(mag_qdc, fit_data, mag_data, cadence=np.timedelta64(1,'m')):
'''Fit QDC to data.
mag_qdc: QDC to be fitted.
fit_data: previous interval to which QDC should be fitted.
mag_data: the magnetometer data of interest.
cadence: cadence of return value
Fits QDC data by first fitting (DC shift) to fit_data. Then
interpolates to produce a expected QDC values at given cadence.
'''
num = ((mag_data.end_time - mag_data.start_time)/ cadence) + 1
# qdc_sample_times = np.linspace(fit_data.start_time.astype('M8[m]'),
# fit_data.end_time.astype('M8[m]'),
# num)
#qdc_sample_times = (mag_data.start_time +
# (np.arange(num) * qdc_cadence))
qdc_aligned = None
errors = [0.0] * len(mag_data.channels)
if mag_qdc is not None and fit_data is not None:
try:
# Fit to previous days to find adjustment
qdc_aligned, errors, fi = \
mag_qdc.align(fit_data,
fit=ap.data.Data.minimise_sign_error_fit,
plot_fit=args.plot_fit,
full_output=True)
# Apply same error adjustment to the QDC
mag_qdc.data = (mag_qdc.data.T - errors).T
return qdc_aligned, errors, fi
except Exception as e:
logger.error(traceback.format_exc())
return None, None, None
def mysavefig(fig, filename, exif_tags=None):
global args
path = os.path.dirname(filename)
if not os.path.exists(path):
os.makedirs(path)
fig.axes[-1].set_xlabel('Time (UT)')
# Override labelling format
for ax in fig.axes:
ax.grid(True)
ax.xaxis.set_major_formatter(dt64.Datetime64Formatter(fmt='%H'))
if np.diff(ax.get_xlim()).astype('m8[' + dt64.get_plot_units(ax.xaxis) \
+ ']') == np.timedelta64(24, 'h'):
ax.xaxis.set_major_locator(\
dt64.Datetime64Locator(interval=np.timedelta64(3, 'h'),
maxticks=10))
# TO DO: Update all site information with correct copyright,
# license and attribution data. Temporarily set here as currently
# all are CC4 BY-NC-SA.
if exif_tags is None:
exif_tags = {
'Exif.Image.Copyright': \
'This work is licensed under the Creative Commons ' + \
'Attribution-NonCommercial-ShareAlike 4.0 Unported ' + \
'License. To view a copy of this license, visit ' + \
'http://creativecommons.org/licenses/by-nc-sa/4.0/'
}
if exif_tags is None or len(exif_tags) == 0:
# Can save directly to a file
fig.savefig(buf, dpi=80)
else:
# Save the figure to a buffer which is used to create a
# pyexiv2 object.
image_format = filename[(filename.rindex('.') + 1):]
buf = io.BytesIO()
fig.savefig(buf, dpi=80, format=image_format)
buf.seek(0)
metadata = pyexiv2.ImageMetadata.from_buffer(buf.getvalue())
metadata.read()
# Add the metadata. pyexiv2 only supports a few tags
for k in exif_tags:
metadata[k] = exif_tags[k]
metadata.write()
f = open(filename, 'wb') # Open the file originally specified
f.write(metadata.buffer) # Finally write to disk
f.close()
buf.close()
logger.info('saved to ' + filename)
if not args.show:
plt.close('all') # Close to save memory
def has_data_of_type(project, site, data_type):
return ap.projects.has_key(project) \
and ap.projects[project].has_key(site) \
and ap.projects[project][site]['data_types'].has_key(data_type)
def round_to(a, b, func=np.round):
return func(a / b) * b
def activity_plot(mag_data, mag_qdc, filename, exif_tags,
k_index_filename=None):
channel = mag_data.channels[0]
pos = [0.15, 0.1, 0.775, 0.75]
if mag_qdc is None:
activity = None
mag_data.plot(label=channel, color='black')
fig = plt.gcf()
ax2 = plt.gca()
else:
channel = mag_data.channels[0]
activity = ap.auroralactivity.AuroraWatchActivity(magdata=mag_data,
magqdc=mag_qdc,
fit=None)
# To get another axes the position must be different. It is made
# the same position later.
pos2 = copy.copy(pos)
pos2[0] += 0.1
fig = plt.figure()
ax = plt.axes(pos)
activity.plot(axes=ax, units_prefix='n',
label='Activity (' + channel + ')')
ax2 = plt.axes(pos2)
# Set Y limit to be 1.5 times highest threshold. Units are
# nanotesla since that was set when plotting.
ax.set_ylim(0, activity.thresholds[-1] * 1.5 * 1e9)
mag_data.plot(label=channel, color='black',axes=ax2)
# Align the QDC to regular intervals between start and end times
qdc_cadence = np.timedelta64(1, 'm')
num = ((mag_data.end_time - mag_data.start_time)/ qdc_cadence) + 1
qdc_sample_times = np.linspace(mag_data.start_time.astype('M8[m]'),
mag_data.end_time.astype('M8[m]'),
num)
qdc_aligned = mag_qdc.align(qdc_sample_times)
qdc_aligned.plot(label=channel + ' QDC', color='cyan', axes=ax2)
ax.set_axis_bgcolor('w')
ax.axison = False
ax2.set_title(activity.make_title())
ax2.set_axis_bgcolor('none')
ax2.set_position(pos)
# min_ylim_range = 400
if activity:
min_ylim_range = activity.thresholds[-1] * 1.5 * 1e9
ax2_ylim = ax2.get_ylim()
if np.diff(ax2_ylim) < min_ylim_range:
ax2.set_ylim(round_to(np.mean(ax2_ylim), 50)
+ min_ylim_range * np.array([-0.5, 0.5]))
fig.set_figwidth(6.4)
fig.set_figheight(4.8)
mysavefig(fig, filename, exif_tags)
return activity
def k_index_plot(mag_data, mag_qdc, filename, exif_tags):
md_filt = mag_data
if ap.has_site_info(mag_data.project, mag_data.site,
'k_index_filter'):
kfilt = ap.get_site_info(mag_data.project, mag_data.site,
'k_index_filter')
if kfilt is not None:
md_filt = kfilt(mag_data)
k_index = ap.auroralactivity.KIndex(magdata=md_filt, magqdc=mag_qdc)
# Fix the start/end times to the data, not the 3h K index samples
k_index.start_time = md_filt.start_time
k_index.end_time = md_filt.end_time
k_index.plot()
fig = plt.gcf()
fig.set_figwidth(6.4)
fig.set_figheight(4.8)
fig.subplots_adjust(bottom=0.1, top=0.85,
left=0.15, right=0.925)
mysavefig(fig, filename, exif_tags)
def make_temperature_plot(temperature_data, filename, exif_tags):
temperature_data.plot()
fig = plt.gcf()
ax = plt.gca()
fig.set_figwidth(6.4)
fig.set_figheight(3)
fig.subplots_adjust(bottom=0.175, top=0.75,
left=0.15, right=0.925)
leg = plt.legend()
leg.get_frame().set_alpha(0.5)
mysavefig(fig, filename, exif_tags)
def make_voltage_plot(voltage_data, filename, exif_tags):
voltage_data.plot()
fig = plt.gcf()
ax = plt.gca()
# ax.set_ylim([1.5, 3.5])
fig.set_figwidth(6.4)
fig.set_figheight(3)
fig.subplots_adjust(bottom=0.175, top=0.75,
left=0.15, right=0.925)
mysavefig(fig, filename, exif_tags)
def make_stack_plot(mdl, filename, exif_tags):
ap.magdata.stack_plot(mdl, offset=100e-9)
fig = plt.gcf()
ax = plt.gca()
ax.grid(True)
fig.subplots_adjust(left=0.15, right=0.925)
mysavefig(fig, filename, exif_tags)
def combined_activity_plot(act, filename, exif_tags):
'''
act: list of AuroraWatchActivity objects
filename: filename for plot
exif_tags: dict of tags to add to image
returns: None
'''
# Calculate activity as proportion of amber alert
act_data = np.concatenate(map(lambda d: d.data / d.thresholds[2], act))
act_data[np.isnan(act_data)] = 0
if act_data.shape[0] == 2:
# When only two sites use lowest activity values
data = np.min(act_data, axis=0)
else:
data = np.median(act_data, axis=0)
activity_data = copy.deepcopy(act[0])
activity_data.project = 'AuroraWatch'
activity_data.site = 'UK'
# Set specific thresholds, and convert data from proportion of
# amber threshold
activity_data.data = np.array([data]) * 100e-9
activity_data.thresholds = np.array([0.0, 50e-9, 100e-9, 200e-9])
activity_data.units = 'T'
activity_data.plot(units_prefix='n')
fig = plt.gcf()
ax = plt.gca()
ax.set_ylabel('Activity (nT)')
ax.set_title('AuroraWatch UK\nAverage geomagnetic activity\n' +
dt64.fmt_dt64_range(activity_data.start_time,
activity_data.end_time))
ax.grid(True)
# Set Y limit to be 1.5 times highest threshold. Units are
# nanotesla since that was set when plotting.
ax.set_ylim(0, activity.thresholds[-1] * 1.5 * 1e9)
fig.set_figwidth(6.4)
fig.set_figheight(4.8)
fig.subplots_adjust(bottom=0.1, top=0.85,
left=0.15, right=0.925)
mysavefig(fig, filename, exif_tags)
def make_links(link_dir, link_data):
for link in link_data:
link_name = os.path.join(link_dir, link['name'])
# Make the target a relative path
target = os.path.relpath(dt64.strftime(link['date'], link['fstr']),
os.path.dirname(link_name))
if os.path.islink(link_name) and \
os.readlink(link_name) == target:
# Exists and is correct
logger.debug('link exists and is correct: ' + link_name +
' -> ' + target)
continue
if os.path.lexists(link_name):
logger.debug('link exists but is incorrect: ' + link_name)
os.unlink(link_name)
logger.debug('creating link ' + link_name + ' -> ' + target)
os.symlink(target, link_name)
# TODO: put in a common location and merge with aurorawatch_jobs.touch_file
def touch_file(filename, amtime=None):
basedir = os.path.dirname(filename)
if not os.path.exists(basedir):
os.makedirs(basedir)
with open(filename, 'a'):
os.utime(filename, amtime)
def clear_timeouts(status_dir):
if os.path.exists(status_dir):
for filename in os.listdir(status_dir):
# Set times back to 1970
touch_file(os.path.join(status_dir, filename), (0, 0))
cc4_by_nc_sa = 'This work is licensed under the Creative Commons ' + \
'Attribution-NonCommercial-ShareAlike 4.0 Unported License. ' + \
'To view a copy of this license, visit ' + \
'http://creativecommons.org/licenses/by-nc-sa/4.0/'
# ==========================================================================
# Parse command line options
parser = argparse.ArgumentParser(description\
='Plot AuroraWatch magnetometer data.')
parser.add_argument('-s', '--start-time',
help='Start time for archive plot mode',
metavar='DATETIME')
parser.add_argument('-e', '--end-time',
help='End time for archive plot mode',
metavar='DATETIME')
parser.add_argument('--now',
help='Set current time for test mode',
metavar='DATETIME')
parser.add_argument('--log-level',
choices=['debug', 'info', 'warning', 'error', 'critical'],
default='warning',
help='Control how much details is printed',
metavar='LEVEL')
parser.add_argument('--log-format',
default='%(levelname)s:%(message)s',
help='Set format of log messages',
metavar='FORMAT')
parser.add_argument('--cache',
nargs='?', # Option has optional value
type=int,
default=0, # No option provided
const=-1, # Option provided but no value (-1=autocompute)
help='Cache loading of data',
metavar='DAYS_TO_CACHE')
parser.add_argument('-m', '--make-links',
action='store_true',
help='Make symbolic links')
parser.add_argument('--qdc-tries',
default=6,
type=int,
help='Number of tries to load QDC',
metavar='NUM')
parser.add_argument('--qdc-fit-days',
default=3,
type=int,
help='Number of days used to fit QDC',
metavar='NUM')
parser.add_argument('--rolling',
action='store_true',
help='Make rolling plots for today (live mode)')
parser.add_argument('--test-mode',
action='store_true',
help='Test mode for plots and jobs')
parser.add_argument('--clear-timeouts',
action='store_true',
help='Mark jobs as not having run for a very long time')
parser.add_argument('--ignore-timeout',
action='store_true',
help='Ignore timeout when running jobs')
parser.add_argument('project_site',
nargs='+',
metavar="PROJECT[/SITE]")
parser.add_argument('--summary-dir',
default='/tmp',
help='Base directory for summary plots',
metavar='PATH')
parser.add_argument('--plot-fit',
action='store_true',
help='Plot and save QDC fit')
parser.add_argument('--show',
action='store_true',
help='Show plots for final day')
parser.add_argument('--stack-plot',
action='store_true',
help='Generate stack plot(s)')
parser.add_argument('--run-jobs',
action='store_true',
help='Run jobs')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format=args.log_format)
# Use a consistent value for current time, process any --now option
# before other time-dependent options.
if args.now:
now = parse_datetime(args.now)
else:
now = np.datetime64('now', 'us')
day = np.timedelta64(24, 'h')
today = dt64.floor(now, day)
yesterday = today - day
tomorrow = today + day
if args.start_time is None:
start_time = today
else:
start_time = dt64.floor(parse_datetime(args.start_time), day)
if args.end_time is None:
end_time = start_time + day
else:
end_time = dt64.floor(parse_datetime(args.end_time), day)
aurorawatch_jobs = None
if args.run_jobs:
try:
import aurorawatch_jobs
except ImportError as e:
logger.error('Failed to import aurorawatch_jobs')
logger.error(traceback.format_exc())
args.run_jobs = False
if args.test_mode:
summary_dir = os.path.join(args.summary_dir, test_mode_str)
else:
summary_dir = args.summary_dir
if args.clear_timeouts:
clear_timeouts(os.path.join(summary_dir, 'job_status'))
project_list, site_list = ap.parse_project_site_list(args.project_site)
if args.cache != 0:
try:
import cachetools
import auroraplot.utils
if args.cache == -1:
cache_size = ((1 + args.qdc_fit_days) * len(site_list)) + 1
else:
cache_size = args.cache
logger.debug('Cache %d MagData items' % cache_size)
load_data = ap.utils.CachedFunc(ap.load_data,
cache_class=cachetools.LRUCache,
maxsize=cache_size)
except ImportError:
logger.error('Failed to configure cache')
logger.error(traceback.format_exc())
# t1 = start_time
# while t1 < end_time:
# Iterate over the list of days to process. If rolling plots were
# specified the last item will be start/end times for the rolling
# plot.
for t1, t2, rolling in date_generator():
t1_sod = dt64.floor(t1, day)
plt.close('all')
### DEBUG: Phase these out
t1_eod = dt64.ceil(t1, day) # t1 end of day
t2_eod = dt64.ceil(t2, day) # t2 end of day
# List of magdata objects for this 24 hour period
mag_data_list = []
activity_data_list = []
# Get copyright and attribution data for all sites. Licenses had
# better be compatible (or we have express permission) since we
# are combining them.
copyright_list = []
attribution_list = []
for site_num in range(len(site_list)):
project_uc = project_list[site_num]
project_lc = project_uc.lower()
site_uc = site_list[site_num]
site_lc = site_uc.lower()
logger.debug('Processing %s/%s' % (project_uc, site_uc))
# Ignore this 24 hour period if outside the site's listed
# operational period
site_start_time = ap.get_site_info(project_uc, site_uc,
info='start_time')
site_end_time = ap.get_site_info(project_uc, site_uc,
info='end_time')
if ((site_start_time and t2 <= site_start_time) or
(site_end_time and t1 >= site_end_time)):
continue
copyright_ = ap.get_site_info(project_uc, site_uc, 'copyright')
attribution = ap.get_site_info(project_uc, site_uc, 'attribution')
exif_tags = {'Exif.Image.Copyright': \
' '.join(['Copyright: ' + copyright_,
'License: ' + \
ap.get_site_info(project_uc,
site_uc,
'license'),
'Attribution: ' + attribution])}
site_summary_dir = os.path.join(summary_dir,
project_lc, site_lc)
site_status_dir = os.path.join(site_summary_dir, 'job_status')
if rolling:
# Rolling plots should have fixed (not time-dependent)
# filenames
mag_plot_filename = os.path.join(site_summary_dir, 'rolling.png')
k_filename = os.path.join(site_summary_dir, 'rolling_k.png')
temp_plot_filename = os.path.join(site_summary_dir,
'rolling_temp.png')
volt_plot_filename = os.path.join(site_summary_dir,
'rolling_volt.png')
stackplot_filename = os.path.join(summary_dir,
'stackplots', 'rolling.png')
activity_plot_filename = os.path.join(summary_dir,
'activity_plots',
'rolling.png')
else:
mag_plot_filename = \
dt64.strftime(t1,
os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_%Y%m%d.png'))
qdc_fit_filename = \
dt64.strftime(t1,
os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_%Y%m%d_fit.png'))
k_filename = \
dt64.strftime(t1,
os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_k_%Y%m%d.png'))
stackplot_filename = \
dt64.strftime(t1,
os.path.join(summary_dir, 'stackplots',
'%Y', '%m', '%Y%m%d.png'))
activity_plot_filename = \
dt64.strftime(t1,
os.path.join(summary_dir, 'activity_plots',
'%Y', '%m', '%Y%m%d.png'))
temp_plot_filename = \
dt64.strftime(t1,
os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_temp_%Y%m%d.png'))
volt_plot_filename = \
dt64.strftime(t1,
os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_voltage_%Y%m%d.png'))
if args.clear_timeouts and t1 == start_time:
clear_timeouts(site_status_dir)
# Load magnetometer data and QDC
mag_data = None
mag_qdc = None
activity = None
### qdc_aligned = None
try:
mag_data = load_mag_data(project_uc, site_uc, t1, t2)
logger.debug(mag_data)
if mag_data is not None:
# Store copyright and attribution. Used later in stackplots
mag_data.copyright = copyright_
mag_data.attribution = attribution
mag_data_list.append(mag_data)
mag_qdc = ap.magdata.load_qdc(project_uc,
site_uc,
compute_mag_qdc_t(t1),
tries=args.qdc_tries)
if mag_qdc is not None:
# Try fitting QDC to previous 3 days of data
mag_data_prev = load_mag_data(project_uc,
site_uc,
t1_sod - (3*day),
t1_sod)
if mag_data_prev is not None:
fitted_qdc, errors, fi \
= fit_qdc(mag_qdc, mag_data_prev, mag_data)
if args.plot_fit and not rolling:
fig = plt.gcf()
fig.set_figwidth(6.4)
fig.set_figheight(4.8)
fig.subplots_adjust(bottom=0.1, top=0.85,
left=0.15, right=0.925)
mysavefig(fig, qdc_fit_filename, exif_tags)
# Standard AuroraWatch UK activity plot
activity = activity_plot(mag_data, mag_qdc,
mag_plot_filename,
exif_tags)
if activity is not None:
activity.copyright = copyright_
activity.attribution = attribution
activity_data_list.append(activity)
# Local K-index plot
k_index_plot(mag_data, mag_qdc, k_filename, exif_tags)
except Exception as e:
logger.error(traceback.format_exc())
temp_data = None
try:
if has_data_of_type(project_uc, site_uc, 'TemperatureData'):
temp_data = my_load_data(project_uc, site_uc,
'TemperatureData',
t1, t2)
if temp_data is not None:
temp_data.set_cadence(np.timedelta64(5, 'm'),
inplace=True)
make_temperature_plot(temp_data, temp_plot_filename,
exif_tags)
except Exception as e:
logger.error(traceback.format_exc())
voltage_data = None
try:
if has_data_of_type(project_uc, site_uc, 'VoltageData'):
voltage_data = my_load_data(project_uc, site_uc,
'VoltageData',
t1, t2)
if (voltage_data is not None
and not np.all(np.isnan(voltage_data.data))):
voltage_data.set_cadence(np.timedelta64(5, 'm'),
inplace=True)
make_voltage_plot(voltage_data, volt_plot_filename,
exif_tags)
except Exception as e:
logger.error(traceback.format_exc())
if rolling and args.run_jobs:
# Jobs are only run for rolling (live) mode.
try:
logger.info('Running site job for ' + project_uc + '/' \
+ site_uc)
aurorawatch_jobs.site_job(project_uc,
site_uc,
now,
site_status_dir,
args.test_mode,
args.ignore_timeout,
mag_data,
None if mag_data is None \
else activity,
temp_data,
voltage_data)
except Exception as e:
logger.error('Could not run job for ' + project_uc + '/' +
site_uc + ': ' + str(e))
traceback.format_exc()
if args.stack_plot and len(mag_data_list):
try:
site_ca = [] # site copyright/attribution details
for m in mag_data_list:
site_ca.append(m.project + '/' + m.site +
' data: ' +
' Copyright: ' + m.copyright +
' Attribution: ' + m.attribution)
exif_tags2 = {'Exif.Image.Copyright': \
' | '.join(site_ca) + ' | License: ' \
+ cc4_by_nc_sa}
make_stack_plot(mag_data_list, stackplot_filename, exif_tags2)
combined_activity_plot(activity_data_list, activity_plot_filename,
exif_tags2)
except Exception as e:
logger.error(traceback.format_exc())
if rolling and args.run_jobs:
try:
logger.info('Running activity job')
status_dir = os.path.join(summary_dir, 'job_status')
aurorawatch_jobs.activity_job(combined_activity,
activity_data_list,
now,
status_dir,
args.test_mode,
args.ignore_timeout,)
except Exception as e:
logger.error('Could not run activity job: ' + str(e))
raise
# End of time loop
if args.make_links:
logger.debug('Making links')
# Makes site links for each site listed
for project_uc, site_uc in project_site.values():
site_lc = site_uc.lower()
site_summary_dir = os.path.join(summary_dir,
project_uc.lower(), site_lc)
mag_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_%Y%m%d.png')
temp_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_temp_%Y%m%d.png')
voltage_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_voltage_%Y%m%d.png')
k_fstr = os.path.join(site_summary_dir, '%Y', '%m',
site_lc + '_k_%Y%m%d.png')
link_data = [{'name': 'yesterday.png',
'date': yesterday,
'fstr': mag_fstr},
{'name': 'yesterday_temp.png',
'date': yesterday,
'fstr': temp_fstr},
{'name': 'yesterday_volt.png',
'date': yesterday,
'fstr': voltage_fstr},
{'name': 'yesterday_k.png',
'date': yesterday,
'fstr': k_fstr},
{'name': 'today.png',
'date': today,
'fstr': mag_fstr},
{'name': 'today_temp.png',
'date': today,
'fstr': temp_fstr},
{'name': 'today_volt.png',
'date': today,
'fstr': voltage_fstr},
{'name': 'today_k.png',
'date': today,
'fstr': k_fstr},
]
make_links(site_summary_dir, link_data)
# Stack plots and combined activity links use a different base
# directories
make_links(os.path.join(summary_dir, 'stackplots'),
[{'name': 'yesterday.png',
'date': yesterday,
'fstr': stackplot_fstr},
{'name': 'today.png',
'date': today,
'fstr': stackplot_fstr}])
make_links(os.path.join(summary_dir, 'activity_plots'),
[{'name': 'yesterday.png',
'date': yesterday,
'fstr': actplot_fstr},
{'name': 'today.png',
'date': today,
'fstr': actplot_fstr}])
if args.show:
plt.show()
| gpl-2.0 |
sealhuang/brainDecodingToolbox | braincode/util/colorwheel.py | 3 | 1122 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
display_axes = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection='polar')
# This is a nasty hack - using the hidden field to multiply the values
# such that 1 become 2*pi, this field is supposed to take values 1 or -1 only!!
display_axes._direction = 2*np.pi
norm = mpl.colors.Normalize(0.0, 2*np.pi)
# Plot the colorbar onto the polar axis
# note - use orientation horizontal so that the gradient goes around
# the wheel rather than centre out
quant_steps = 2056
cb = mpl.colorbar.ColorbarBase(display_axes,
cmap=cm.get_cmap('RdBu_r', quant_steps),
norm=norm,
orientation='horizontal')
# aesthetics - get rid of border and axis labels
cb.outline.set_visible(False)
display_axes.set_axis_off()
# Replace with plt.savefig if you want to save a file
#plt.show()
plt.savefig('colorwheel.png')
| bsd-3-clause |
CallaJun/hackprince | indico/mpl_toolkits/tests/test_mplot3d.py | 7 | 6822 | from mpl_toolkits.mplot3d import Axes3D, axes3d
from matplotlib import cm
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import numpy as np
@image_comparison(baseline_images=['bar3d'], remove_text=True)
def test_bar3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for c, z in zip(['r', 'g', 'b', 'y'], [30, 20, 10, 0]):
xs = np.arange(20)
ys = np.arange(20)
cs = [c] * len(xs)
cs[0] = 'c'
ax.bar(xs, ys, zs=z, zdir='y', color=cs, alpha=0.8)
@image_comparison(baseline_images=['contour3d'], remove_text=True)
def test_contour3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
cset = ax.contour(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlim(-40, 40)
ax.set_ylim(-40, 40)
ax.set_zlim(-100, 100)
@image_comparison(baseline_images=['contourf3d'], remove_text=True)
def test_contourf3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlim(-40, 40)
ax.set_ylim(-40, 40)
ax.set_zlim(-100, 100)
@image_comparison(baseline_images=['lines3d'], remove_text=True)
def test_lines3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
z = np.linspace(-2, 2, 100)
r = z ** 2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
ax.plot(x, y, z)
@image_comparison(baseline_images=['mixedsubplot'], remove_text=True)
def test_mixedsubplots():
def f(t):
s1 = np.cos(2*np.pi*t)
e1 = np.exp(-t)
return np.multiply(s1, e1)
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
fig = plt.figure(figsize=plt.figaspect(2.))
ax = fig.add_subplot(2, 1, 1)
l = ax.plot(t1, f(t1), 'bo',
t2, f(t2), 'k--', markerfacecolor='green')
ax.grid(True)
ax = fig.add_subplot(2, 1, 2, projection='3d')
X, Y = np.meshgrid(np.arange(-5, 5, 0.25), np.arange(-5, 5, 0.25))
R = np.sqrt(X ** 2 + Y ** 2)
Z = np.sin(R)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
linewidth=0, antialiased=False)
ax.set_zlim3d(-1, 1)
@image_comparison(baseline_images=['scatter3d'], remove_text=True)
def test_scatter3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(np.arange(10), np.arange(10), np.arange(10),
c='r', marker='o')
ax.scatter(np.arange(10, 20), np.arange(10, 20), np.arange(10, 20),
c='b', marker='^')
@image_comparison(baseline_images=['surface3d'], remove_text=True)
def test_surface3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sqrt(X ** 2 + Y ** 2)
Z = np.sin(R)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
lw=0, antialiased=False)
ax.set_zlim(-1.01, 1.01)
fig.colorbar(surf, shrink=0.5, aspect=5)
@image_comparison(baseline_images=['text3d'])
def test_text3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
zdirs = (None, 'x', 'y', 'z', (1, 1, 0), (1, 1, 1))
xs = (2, 6, 4, 9, 7, 2)
ys = (6, 4, 8, 7, 2, 2)
zs = (4, 2, 5, 6, 1, 7)
for zdir, x, y, z in zip(zdirs, xs, ys, zs):
label = '(%d, %d, %d), dir=%s' % (x, y, z, zdir)
ax.text(x, y, z, label, zdir)
ax.text(1, 1, 1, "red", color='red')
ax.text2D(0.05, 0.95, "2D Text", transform=ax.transAxes)
ax.set_xlim3d(0, 10)
ax.set_ylim3d(0, 10)
ax.set_zlim3d(0, 10)
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
@image_comparison(baseline_images=['trisurf3d'], remove_text=True)
def test_trisurf3d():
n_angles = 36
n_radii = 8
radii = np.linspace(0.125, 1.0, n_radii)
angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += np.pi/n_angles
x = np.append(0, (radii*np.cos(angles)).flatten())
y = np.append(0, (radii*np.sin(angles)).flatten())
z = np.sin(-x*y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)
@image_comparison(baseline_images=['wireframe3d'], remove_text=True)
def test_wireframe3d():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
@image_comparison(baseline_images=['quiver3d'], remove_text=True)
def test_quiver3d():
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y, z = np.ogrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1)
@image_comparison(baseline_images=['quiver3d_empty'], remove_text=True)
def test_quiver3d_empty():
fig = plt.figure()
ax = fig.gca(projection='3d')
x, y, z = np.ogrid[-1:0.8:0j, -1:0.8:0j, -1:0.6:0j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
ax.quiver(x, y, z, u, v, w, length=0.1)
@image_comparison(baseline_images=['quiver3d_masked'], remove_text=True)
def test_quiver3d_masked():
fig = plt.figure()
ax = fig.gca(projection='3d')
# Using mgrid here instead of ogrid because masked_where doesn't
# seem to like broadcasting very much...
x, y, z = np.mgrid[-1:0.8:10j, -1:0.8:10j, -1:0.6:3j]
u = np.sin(np.pi * x) * np.cos(np.pi * y) * np.cos(np.pi * z)
v = -np.cos(np.pi * x) * np.sin(np.pi * y) * np.cos(np.pi * z)
w = (np.sqrt(2.0 / 3.0) * np.cos(np.pi * x) * np.cos(np.pi * y) *
np.sin(np.pi * z))
u = np.ma.masked_where((-0.4 < x) & (x < 0.1), u, copy=False)
v = np.ma.masked_where((0.1 < y) & (y < 0.7), v, copy=False)
ax.quiver(x, y, z, u, v, w, length=0.1)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| lgpl-3.0 |
kiliakis/BLonD-minimal-cpp | python/plot_llrf.py | 2 | 15063 |
# Copyright 2016 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Module to plot different LLRF features**
:Authors: **Helga Timko**, **Danilo Quartullo**
'''
from __future__ import division
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from pylab import cm
import numpy as np
import os
def plot_noise_spectrum(frequency, spectrum, sampling=1, dirname='fig',
figno=0):
"""
Plot of the phase noise spectrum.
For large amount of data, use "sampling" to plot a fraction of the data.
"""
# Plot
fig = plt.figure(1)
fig.set_size_inches(8, 6)
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.set_xlim([0, 300])
ax.plot(frequency[::sampling], spectrum[::sampling])
ax.set_xlabel("Frequency [Hz]")
params = {'text.usetex': False, 'mathtext.default': 'sf'}
plt.rcParams.update(params)
ax.set_ylabel(r"Noise spectrum [$\frac{rad^2}{Hz}$]")
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/noise_spectrum_' "%d" % figno + '.png'
plt.savefig(fign)
plt.clf()
def plot_phase_noise(time, dphi, sampling=1, dirname='fig', figno=0):
"""
Plot of phase noise as a function of time.
For large amount of data, use "sampling" to plot a fraction of the data.
"""
# Plot
fig = plt.figure(1)
fig.set_size_inches(8, 6)
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(time[::sampling], dphi[::sampling])
ax.set_xlabel("Time [s]")
ax.set_ylabel(r"Phase noise [rad]")
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/phase_noise_' "%d" % figno + '.png'
plt.savefig(fign)
plt.clf()
def plot_PL_bunch_phase(time_step, PL_bunch_phase, output_freq=1,
dirname='fig'):
"""
Plot of bunch phase measured by the Phase Loop as a function of time.
For large amount of data, monitor with larger 'output_freq'.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
t = output_freq*np.arange(ndata)
dphi = np.array(PL_bunch_phase[0:ndata], dtype=np.double)
dphi[time_step:] = np.nan
# Plot
fig = plt.figure(1)
fig.set_size_inches(8, 6)
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(t, dphi, '.')
ax.set_xlabel(r"No. turns [T$_0$]")
ax.set_ylabel(r"PL $\phi_{\mathsf{bunch}}$ [rad]")
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if time_step > 100000:
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/PL_bunch_phase.png'
plt.savefig(fign)
plt.clf()
def plot_PL_RF_phase(time_step, PL_phiRF, output_freq=1,
dirname='fig'):
"""
Plot of RF phase; monitored with Phase Loop.
For large amount of data, monitor with larger 'output_freq'.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
t = output_freq*np.arange(ndata)
dphi = np.array(PL_phiRF[0:ndata], dtype=np.double)
dphi[time_step:] = np.nan
# Plot
plt.figure(1, figsize=(8, 6))
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(t, dphi, '.')
ax.set_xlabel(r"No. turns [T$_0$]")
ax.set_ylabel(r"RF phase $\phi_{\mathsf{RF}}$ [rad]")
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if time_step > 100000:
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/RF_phase.png'
plt.savefig(fign)
plt.clf()
def plot_PL_phase_corr(time_step, PL_phase_corr, output_freq=1,
dirname='fig'):
"""
Plot of phase correction applied by the Phase Loop as a function of time.
For large amount of data, monitor with larger 'output_freq'.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
t = output_freq*np.arange(ndata)
dphi = np.array(PL_phase_corr[0:ndata], dtype=np.double)
dphi[time_step:] = np.nan
# Plot
fig = plt.figure(1)
fig.set_size_inches(8, 6)
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(t, dphi, '.')
ax.set_xlabel(r"No. turns [T$_0$]")
ax.set_ylabel(r"PL $\phi$ correction [rad]")
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if time_step > 100000:
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/PL_phase_corr.png'
plt.savefig(fign)
plt.clf()
def plot_PL_RF_freq(time_step, PL_omegaRF, output_freq=1,
dirname='fig'):
"""
Plot of RF revolution frequency; monitored with Phase Loop.
For large amount of data, monitor with larger 'output_freq'.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
t = output_freq*np.arange(ndata)
dphi = np.array(PL_omegaRF[0:ndata], dtype=np.double)
dphi[time_step:] = np.nan
# Plot
plt.figure(1, figsize=(8, 6))
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(t, dphi, '.')
ax.set_xlabel(r"No. turns [T$_0$]")
ax.set_ylabel(r"RF revolution frequency $\omega_{\mathsf{RF}}$ [1/s]")
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if time_step > 100000:
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/RF_freq.png'
plt.savefig(fign)
plt.clf()
def plot_PL_freq_corr(time_step, PL_omegaRF_corr, output_freq=1,
dirname='fig'):
"""
Plot of frequency correction applied by the Phase Loop as a function of time.
For large amount of data, monitor with larger 'output_freq'.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
t = output_freq*np.arange(ndata)
dphi = np.array(PL_omegaRF_corr[0:ndata], dtype=np.double)
dphi[time_step:] = np.nan
# Plot
fig = plt.figure(1)
fig.set_size_inches(8, 6)
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(t, dphi, '.')
ax.set_xlabel(r"No. turns [T$_0$]")
ax.set_ylabel(r"PL $\omega_{\mathsf{RF}}$ correction [1/s]")
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if time_step > 100000:
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/PL_freq_corr.png'
plt.savefig(fign)
plt.clf()
def plot_RF_phase_error(time_step, SL_dphiRF, output_freq=1,
dirname='fig'):
"""
Plot of accumulated RF phase error; the Synchro Loop can act on this.
For large amount of data, monitor with larger 'output_freq'.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
t = output_freq*np.arange(ndata)
dphi = np.array(SL_dphiRF[0:ndata], dtype=np.double)
dphi[time_step:] = np.nan
# Plot
plt.figure(1, figsize=(8, 6))
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(t, dphi, '.')
ax.set_xlabel(r"No. turns [T$_0$]")
ax.set_ylabel(r"RF phase error $\Delta \phi_{\mathsf{RF}}$ [rad]")
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if time_step > 100000:
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/RF_phase_error.png'
plt.savefig(fign)
plt.clf()
def plot_RL_radial_error(time_step, RL_drho, output_freq=1,
dirname='fig'):
"""
Plot of relative radial error; monitored with Phase Loop.
For large amount of data, monitor with larger 'output_freq'.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
t = output_freq*np.arange(ndata)
dphi = np.array(RL_drho[0:ndata], dtype=np.double)
dphi[time_step:] = np.nan
# Plot
plt.figure(1, figsize=(8, 6))
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(t, dphi, '.')
ax.set_xlabel(r"No. turns [T$_0$]")
ax.set_ylabel(r"Relative radial error [1]")
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if time_step > 100000:
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/RL_radial_error.png'
plt.savefig(fign)
plt.clf()
def plot_COM_motion(time_step, mean_dt, mean_dE, output_freq=1,
dirname='fig'):
"""
Evolution of bunch C.O.M. in longitudinal phase space.
Optional use of histograms and separatrix.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
# Load data
mean_dt = np.array(mean_dt[0:ndata], dtype=np.double)
mean_dE = np.array(mean_dE[0:ndata], dtype=np.double)
# Plot
fig = plt.figure(1)
fig.set_size_inches(8, 8)
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.scatter(mean_dt, mean_dE, s=5, edgecolor='none')
ax.set_xlabel(r"$\Delta t$ [s]")
ax.set_ylabel(r"$\Delta$E [eV]")
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
ax.set_xlim((-0.7e-6, 0.7e-6))
plt.figtext(0.95, 0.95, 'C.O.M. evolution', fontsize=16, ha='right',
va='center')
# Save plot
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/COM_evolution.png'
plt.savefig(fign)
plt.clf()
def plot_LHCNoiseFB(time_step, LHC_noise_FB_factor, output_freq=1,
dirname='fig'):
"""
Plot of the phase noise multiplication factor as a function of time.
For large amount of data, monitor with larger 'output_freq'.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
t = output_freq*np.arange(ndata)
x = np.array(LHC_noise_FB_factor[0:ndata], dtype=np.double)
x[time_step:] = np.nan
# Plot
fig = plt.figure(1)
fig.set_size_inches(8, 6)
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(t, x, '.')
ax.set_xlabel(r"No. turns [T$_0$]")
ax.set_ylabel(r"LHC noise FB scaling factor [1]")
ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
if time_step > 100000:
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/LHC_noise_FB.png'
plt.savefig(fign)
plt.clf()
def plot_LHCNoiseFB_FWHM(time_step, LHC_noise_FB_bl,
output_freq=1, dirname='fig'):
"""
Plot of the FWHM bunch length used in LHCNoiseFB as a function of time.
For large amount of data, monitor with larger 'output_freq'.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
t = output_freq*np.arange(ndata)
x = np.array(LHC_noise_FB_bl[0:ndata], dtype=np.double)
x[time_step:] = np.nan
# Plot
fig = plt.figure(1)
fig.set_size_inches(8, 6)
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
ax.plot(t, x, '.')
ax.set_xlabel(r"No. turns [T$_0$]")
ax.set_ylabel(r"4-sigma FWHM bunch length [s]")
if time_step > 100000:
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/LHC_noise_FB_bl.png'
plt.savefig(fign)
plt.clf()
def plot_LHCNoiseFB_FWHM_bbb(time_step, LHC_noise_FB_bl_bbb,
output_freq=1, dirname='fig'):
"""
Plot of bunch-by-bunch FWHM bunch length used in LHCNoiseFB as a function
of time. For large amount of data, monitor with larger 'output_freq'.
"""
# Time step of plotting
# time_step = RFSectionParameters.counter[0]
# Load/create data
if output_freq < 1:
output_freq = 1
ndata = int(time_step/output_freq)
t = output_freq*np.arange(ndata)
x = np.array(LHC_noise_FB_bl_bbb[0:ndata, :], ndmin=2)
x[time_step:, :] = np.nan
nbunches = x.shape[1]
# Plot
fig = plt.figure(1)
fig.set_size_inches(8, 6)
ax = plt.axes([0.15, 0.1, 0.8, 0.8])
for i in range(nbunches):
ax.plot(t, x[:, i], '.', color=cm.get_cmap(
'jet')(i/nbunches), label="Bunch %d" % i)
ax.set_xlabel(r"No. turns [T$_0$]")
ax.set_ylabel(r"4-sigma FWHM bunch length [s]")
if time_step > 100000:
ax.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
ax.legend()
# Save figure
if not os.path.exists(dirname):
os.makedirs(dirname)
fign = dirname + '/LHC_noise_FB_bl_bbb.png'
plt.savefig(fign)
plt.clf()
| gpl-3.0 |
bryan-lunt/py_pointcloud_reader | MeshDeformAndView.py | 2 | 1725 | # coding: utf-8
import sys
sys.path.append("./src/")
from pypointcloud import *
from spline_tools import ParametricSpline
import scipy as S
results = None
with open("./D_mel_wt__atlas_r2.vpc") as infile:
results = read_vpc(infile)
d = results[1]
x = d[:,0]
y = d[:,1]
z = d[:,2]
ap_line = None
x_min = x.min()
x_max = x.max()
z_min = z.min()
z_max = z.max()
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.widgets import Button
N_SPLINES = 9
points_per_spline = 5
from scipy.interpolate import griddata
mesh_x = S.loadtxt("mesh_x.txt")
mesh_y = S.loadtxt("mesh_y.txt")
the_splines = list()
for i in range(mesh_x.shape[0]):
the_splines.append(ParametricSpline(mesh_x[i],mesh_y[i]))
SAMPLE_NUMBER = 100
ts = S.linspace(0.0,1.0,SAMPLE_NUMBER)
old_xy = S.vstack([aspline(ts) for aspline in the_splines])
new_xy = S.vstack([S.hstack([i*S.ones((SAMPLE_NUMBER,1)), ts.reshape(-1,1)]) for i in range(len(the_splines))])
new_xs = griddata(old_xy, new_xy[:,0], (x, z), method='linear')
new_ys = griddata(old_xy, new_xy[:,1], (x, z), method='linear')
disp_genes = ["kni__3","D__3","hbP__3","bcdP__3","KrP__3","gt__3","eve__3","odd__3","rho__3","sna__3"]
#disp_genes = ["eve__3"]
for one_gene_name in disp_genes:
colnum = results[0]["column"].index(one_gene_name)-1
colors = S.vstack([d[:,colnum],S.zeros(d.shape[0]),S.zeros(d.shape[0])]).T
colors -= colors.min()
colors*=S.power(colors.max(),-1.0)
fig = plt.figure(figsize=(4,2))
ax = fig.add_subplot(1,1,1)
ax.set_title(one_gene_name)
ax.scatter(new_xs,new_ys,s=45.0,c=colors,alpha=0.75)
for i in range(8):
ax.axvline(float(i),c="g")
fig.tight_layout()
plt.show()
| gpl-3.0 |
rrogge/supersid | supersid/wxsidviewer.py | 2 | 8399 | """
wxSidViewer class implements a graphical user interface for SID based on wxPython
About Threads and wxPython http://www.blog.pythonlibrary.org/2010/05/22/wxpython-and-threads/
Each Viewer must implement:
- __init__(): all initializations
- run(): main loop to get user input
- close(): cleaning up
- status_display(): display a message in a status bar or equivalent
"""
from __future__ import print_function
import matplotlib
#matplotlib.use('WXAgg') # select back-end before pylab
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.figure import Figure
import wx
from wx.lib.pubsub import Publisher
import supersid_plot as SSP
from config import FILTERED, RAW
class wxSidViewer(wx.Frame):
'''
Frame, Menu, Panel, BoxSizer are wx things and FigureCanvas, Figure, Axes are MPL things
Viewer =>> Panel =>> FigureCanvas =>> Figure => Axes
frame close events are forwarded to SuperSID class
'''
def __init__(self, controller):
"""SuperSID Viewer using wxPython GUI for standalone and client.
Creation of the Frame with menu and graph display using matplotlib
"""
matplotlib.use('WXAgg') # select back-end before pylab
# the application MUST created first
self.app = wx.App(redirect=False)
#
self.version = "1.3.1 20150421 (wx)"
self.controller = controller # previously referred as 'parent'
# Frame
wx.Frame.__init__(self, None, -1, "supersid @ " + self.controller.config['site_name'], pos = (20, 20), size=(1000,400))
self.Bind(wx.EVT_CLOSE, self.on_close)
# Icon
try:
self.SetIcon(wx.Icon("supersid_icon.png", wx.BITMAP_TYPE_PNG))
finally:
pass
# All Menus creation
menu_item_file = wx.Menu()
save_buffers_menu = menu_item_file.Append(wx.NewId(), '&Save Raw Buffers\tCtrl+B', 'Save Raw Buffers')
save_filtered_menu = menu_item_file.Append(wx.NewId(),'&Save Filtered Buffers\tCtrl+F', 'Save Filtered Buffers')
exit_menu = menu_item_file.Append(wx.NewId(), '&Quit\tCtrl+Q', 'Quit Super SID')
menu_item_plot = wx.Menu()
plot_menu = menu_item_plot.Append(wx.NewId(), '&Plot\tCtrl+P', 'Plot data')
menu_item_help = wx.Menu()
about_menu = menu_item_help.Append(wx.NewId(), '&About', 'About Super SID')
menubar = wx.MenuBar()
menubar.Append(menu_item_file, '&File')
menubar.Append(menu_item_plot, '&Plot')
menubar.Append(menu_item_help, '&Help')
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.on_save_buffers, save_buffers_menu)
self.Bind(wx.EVT_MENU, self.on_save_filtered, save_filtered_menu)
self.Bind(wx.EVT_MENU, self.on_plot, plot_menu)
self.Bind(wx.EVT_MENU, self.on_about, about_menu)
self.Bind(wx.EVT_MENU, self.on_exit, exit_menu)
# Frame
psd_panel = wx.Panel(self, -1)
psd_sizer = wx.BoxSizer(wx.VERTICAL)
psd_panel.SetSizer(psd_sizer)
# FigureCanvas
psd_figure = Figure(facecolor='beige') # 'bisque' 'antiquewhite' 'FFE4C4' 'F5F5DC' 'grey'
self.canvas = FigureCanvas(psd_panel, -1, psd_figure)
self.canvas.mpl_connect('button_press_event', self.on_click) # MPL call back
psd_sizer.Add(self.canvas, 1, wx.EXPAND)
self.axes = psd_figure.add_subplot(111)
self.axes.hold(False)
# StatusBar
self.status_bar = self.CreateStatusBar()
self.status_bar.SetFieldsCount(2)
# Default View
self.SetMinSize((600,600))
psd_sizer.SetItemMinSize(psd_panel,1000,600)
self.Center(True)
self.Show()
# create a pubsub receiver for refresh after data capture / ref. link on threads
Publisher().subscribe(self.updateDisplay, "update")
def run(self):
"""Main loop for the application"""
self.app.MainLoop()
def updateDisplay(self, msg):
"""
Receives data from thread and updates the display (graph and statusbar)
"""
try:
self.canvas.draw()
self.status_display(msg.data)
except:
pass
def get_axes(self):
return self.axes
def status_display(self, message, level=0, field=0):
if level == 1:
wx.CallAfter(self.status_display, message)
elif level == 2:
wx.CallAfter(Publisher().sendMessage, "update", message)
else:
self.status_bar.SetStatusText(message,field)
def on_close(self, event):
"""Requested to close by the user"""
self.controller.on_close()
def close(self):
"""Requested to close by the controller"""
self.app.Exit()
self.Destroy()
def on_exit(self, event):
self.status_display("This is supersid signing off...")
dlg = wx.MessageDialog(self,
'Are you sure to quit supersid?', 'Please Confirm',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
if dlg.ShowModal() == wx.ID_YES:
self.Close(True)
def on_plot(self, event):
"""Save current buffers (raw) and display the data using supersid_plot.
Using a separate process to prevent interference with data capture"""
filenames = self.controller.save_current_buffers(log_format = 'supersid_format')
print("plotting", filenames)
SSP.do_main(filenames)
def on_plot_files(self, event):
"""Select multiple files and call the supersid_plot module for display"""
filedialog = wx.FileDialog(self, message = 'Choose files to plot',
defaultDir = self.controller.config.data_path,
defaultFile = '',
wildcard = 'Supported filetypes (*.csv) |*.csv',
style = wx.OPEN |wx.FD_MULTIPLE)
if filedialog.ShowModal() == wx.ID_OK:
filelist = ""
for u_filename in filedialog.GetFilenames():
filelist = str(filelist + "../Data/" + str(u_filename) + ",")
filelist = filelist.rstrip(',') # remove last comma
ssp = SSP.SUPERSID_PLOT()
ssp.plot_filelist(filelist)
def on_save_buffers(self, event):
"""Call the Controller for writing unfiltered/raw data to file"""
self.controller.save_current_buffers(log_type=RAW)
def on_save_filtered(self, event):
"""Call the Controller for writing filtered data to file"""
self.controller.save_current_buffers('current_filtered.csv', FILTERED)
def on_about(self, event):
"""Open an About message box"""
info = wx.AboutDialogInfo()
info.SetIcon(wx.Icon('supersid_icon.png', wx.BITMAP_TYPE_PNG))
info.SetName('SuperSID')
info.SetDescription(self.controller.about_app())
info.SetCopyright('(c) Stanford Solar Center and Eric Gibert')
wx.AboutBox(info)
def on_click(self, event): # MLP mouse event
"""Following user click on the graph, display associated information in statusbar"""
if event.inaxes:
strength = pow(10, (event.ydata/10.0))
message = "frequency=%.0f " % event.xdata + " power=%.3f " % event.ydata + " strength=%.0f" % strength
self.status_display(message, field = 1)
def display_message(self, message="message...", sender="SuperSID"):
"""For any need to display a MessageBox - to review for better button/choice management"""
status = wx.MessageBox(message,
sender,
wx.CANCEL | wx.ICON_QUESTION)
if status == wx.YES:
return 1 #RETRY
elif status == wx.NO:
return 1 #SKIP
elif status == wx.CANCEL:
return 1 #STOP
else:
return 0
def get_psd(self, data, NFFT, FS):
"""By calling 'psd' within axes, it both calculates and plots the spectrum"""
try:
Pxx, freqs = self.axes.psd(data, NFFT = NFFT, Fs = FS)
except wx.PyDeadObjectError:
exit(3)
return Pxx, freqs
| mit |
fabianp/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
rmsare/scarplet | scarplet/tests/test_dem.py | 2 | 1813 | import os
import sys
import filecmp
import numpy as np
import unittest
import matplotlib
import matplotlib.pyplot as plt
from context import scarplet
from scarplet import dem
TEST_DIR = os.path.dirname(__file__)
class CalculationMethodsTestCase(unittest.TestCase):
def setUp(self):
self.dem = dem.DEMGrid(os.path.join(TEST_DIR, 'data/faultzone.tif'))
def test_calculate_slope(self):
sx, sy = self.dem._calculate_slope()
true_sx, true_sy = np.load(os.path.join(TEST_DIR, 'results/faultzone_sxsy.npy'))
self.assertTrue(np.allclose(sx, true_sx), "Slope (x direction) incorrect")
self.assertTrue(np.allclose(sy, true_sy), "Slope (y direction) incorrect")
def test_calculate_laplacian(self):
del2z = self.dem._calculate_laplacian()
true_del2z = np.load(os.path.join(TEST_DIR, 'results/faultzone_del2z.npy'))
self.assertTrue(np.allclose(del2z, true_del2z), "Laplacian incorrect (y axis direction)")
def test_calculate_directional_laplacian(self):
alphas = [-np.pi / 2, -np.pi / 4, np.pi / 4, np.pi / 2]
for alpha in alphas:
del2z = self.dem._calculate_directional_laplacian(alpha)
alpha *= 180 / np.pi
true_del2z = np.load(os.path.join(TEST_DIR, 'results/faultzone_del2z_{:.0f}.npy'.format(alpha)))
self.assertTrue(np.allclose(del2z, true_del2z), "Laplacian incorrect (+{:.0f} deg)".format(alpha))
def test_pad_boundary(self):
dx = 5
dy = 5
grid = self.dem._griddata
padded_grid = np.pad(grid, pad_width=(dy, dx), mode='reflect')
self.dem._pad_boundary(dx, dy)
self.assertEqual(self.dem._griddata.all(), padded_grid.all(), "Grid padded incorrectly")
| mit |
BrainIntensive/OnlineBrainIntensive | resources/HCP/ciftify/ciftify/bin/cifti_vis_PINT.py | 1 | 24283 | #!/usr/bin/env python
"""
Makes temporary seed corr maps using a chosen roi for each network and
correlation maps
Usage:
cifti_vis_PINT snaps [options] <func.dtseries.nii> <subject> <PINT_summary.csv>
cifti_vis_PINT index [options]
Arguments:
<func.dtseries.nii> A dtseries file to feed into
ciftify_PINT_vertices.py map
<subject> Subject ID for HCP surfaces
<PINT_summary.csv> The output csv (*_summary.csv) from the PINT
analysis step
Options:
--qcdir PATH Full path to location of QC directory
--hcp-data-dir PATH The directory for HCP subjects (overrides HCP_DATA
environment variable)
--subjects-filter STR A string that can be used to filter out subject
directories
--roi-radius MM Specify the radius [default: 6] of the plotted rois
(in mm)
-v,--verbose Verbose logging
--debug Debug logging in Erin's very verbose style
-n,--dry-run Dry run
--help Print help
DETAILS
This makes pretty pictures of your hcp views using connectome workbenches
"show scene" commands. It pastes the pretty pictures together into some .html
QC pages
There are two subfunctions:
snaps: will create all the pics as well as the subjects specific html view
for one subject. This option requires the cifti file of functionl
timeseries. The hcp subject id so that it can find the surface information
to plot on. And the *_summary.csv file that was the output of
find-PINT-vertices
index: will make an index out of all the subjects in the qcdir
Note: this script requires the seaborn package to make the correlation
heatmaps...
Written by Erin W Dickie ([email protected]) Jun 20, 2016
"""
import os
import sys
import logging
import logging.config
from abc import ABCMeta
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context="paper", font="monospace")
import pandas as pd
import numpy as np
import nibabel as nib
from docopt import docopt
import ciftify
from ciftify.utilities import VisSettings, add_metaclass
DRYRUN = False
DEBUG = False
# Read logging.conf
config_path = os.path.join(os.path.dirname(__file__), "logging.conf")
logging.config.fileConfig(config_path, disable_existing_loggers=False)
logger = logging.getLogger(os.path.basename(__file__))
PINTnets = [{ 'NETWORK': 2, 'roiidx': 72, 'best_view': "CombinedView"},
{ 'NETWORK': 3, 'roiidx': 2, 'best_view': "CombinedView"},
{ 'NETWORK': 4, 'roiidx': 44, 'best_view': "dtLat"},
{ 'NETWORK': 5, 'roiidx': 62, 'best_view': "dtLat"},
{ 'NETWORK': 6, 'roiidx': 28, 'best_view': "dtLat"},
{ 'NETWORK': 7, 'roiidx': 14, 'best_view': "dtLat"}]
class UserSettings(VisSettings):
def __init__(self, arguments):
VisSettings.__init__(self, arguments, qc_mode='scrois')
## Hack to account for fact that index doesnt expect these variables
if arguments['snaps']:
self.subject = arguments['<subject>']
self.func = self.__get_input_file(arguments['<func.dtseries.nii>'])
self.pint_summary = self.__get_input_file(
arguments['<PINT_summary.csv>'])
self.left_surface = self.__get_surface('L')
self.right_surface = self.__get_surface('R')
else:
self.subject = None
self.func = None
self.pint_summary = None
self.subject_filter = arguments['--subjects-filter']
self.roi_radius = arguments['--roi-radius']
def __get_surface(self, surface_type):
surface = os.path.join(self.hcp_dir, self.subject, 'MNINonLinear',
'fsaverage_LR32k',
'{}.{}.midthickness.32k_fs_LR.surf.gii'.format(self.subject,
surface_type))
return self.__get_input_file(surface)
def __get_input_file(self, file_path):
if not os.path.exists(file_path):
logger.critical("{} not found".format(file_path))
sys.exit(1)
return file_path
class FakeNifti(object):
def __init__(self, func_path, tmp_dir):
self.__func_fnifti = self.__make_fake_nifti(func_path, tmp_dir)
self.data, self.affine, self.header, \
self.dims = ciftify.utilities.loadnii(self.__func_fnifti)
self.template = self.__get_template(func_path, tmp_dir)
def __make_fake_nifti(self, func_path, tmp_dir):
nifti_path = os.path.join(tmp_dir, 'func.nii.gz')
command_list = ['wb_command', '-cifti-convert', '-to-nifti', func_path,
nifti_path]
docmd(command_list)
if not os.path.exists(nifti_path):
logger.critical("Failed to generate file critical file: {} failed "
"command: {}".format(nifti_path, " ".join(command_list)))
sys.exit(1)
return nifti_path
def __get_template(self, func_path, tmp_dir):
template_path = os.path.join(tmp_dir, 'template.dscalar.nii')
command_list = ['wb_command', '-cifti-reduce', func_path, 'MIN',
template_path]
docmd(command_list)
if not os.path.exists(template_path):
logger.critical("Failed to generate critical file: {} failed"
"command: {}".format(template_path, " ".format(
command_list)))
sys.exit(1)
return template_path
@add_metaclass(ABCMeta)
class PDDataframe(object):
dataframe = None
def make_dataframe(self, csv_path, header='infer'):
try:
data_frame = pd.read_csv(csv_path, header=header)
except:
logger.critical("Cannot make dataframe from file {}".format(
csv_path))
sys.exit(1)
return data_frame
class SummaryData(PDDataframe):
vertex_types = ['tvertex', 'ivertex']
def __init__(self, summary_csv):
self.dataframe = self.make_dataframe(summary_csv)
self.vertices = self.__make_vertices(summary_csv)
def __make_vertices(self, summary_csv):
vert_list = []
for vertex in self.vertex_types:
vert_list.append(Vertex(summary_csv, vertex))
return vert_list
class Vertex(PDDataframe):
def __init__(self, summary_csv, vert_type):
self.vert_type = vert_type
self.dataframe = self.__get_dataframe_type(summary_csv)
def __get_dataframe_type(self, csv_path):
new_path = csv_path.replace('_summary',
'_{}_meants'.format(self.vert_type))
data_frame = self.make_dataframe(new_path, header=None)
return data_frame.transpose()
def make_heat_map(self, summary_dataframe, output_dir):
vertex_corrpic = os.path.join(output_dir,
'{}_corrmat.png'.format(self.vert_type))
## Sets title to associate with this image
if self.vert_type == 'tvertex':
self.title = "Pre (tvertex)"
else:
self.title = "Post (ivertex)"
corrmat = self.dataframe.corr()
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(10, 8))
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=.9, square=True)
# Use matplotlib directly to emphasize known networks
for i in summary_dataframe.index:
if i and summary_dataframe.loc[i, 'NETWORK'] != \
summary_dataframe.loc[i-1, 'NETWORK']:
ax.axhline(len(summary_dataframe) - i, c="w", linewidth=3.0)
ax.axvline(i, c="w", linewidth=3.0)
f.tight_layout()
f.savefig(vertex_corrpic)
self.heat_map = vertex_corrpic
return vertex_corrpic
def make_rois(self, network_csv, network_df, left_surface, right_surface,
seed_radius, output_dir):
self.xrois = os.path.join(output_dir, 'xrois.dscalar.nii')
self.__generate_roi(self.vert_type, network_csv, seed_radius,
left_surface, right_surface, self.xrois)
if self.__needs_yrois(network_df):
self.yrois = os.path.join(output_dir, 'yrois.dscalar.nii')
self.__generate_roi('vertex_48', network_csv, seed_radius,
left_surface, right_surface, self.yrois)
else:
self.yrois = self.xrois
self.rois = self.__combine_rois_and_set_palette(output_dir)
def __needs_yrois(self, network_df):
if self.vert_type == 'tvertex':
return False
# if vertex_48 is in df, means failed to stop iterating
if 'vertex_48' not in network_df.columns:
return False
if network_df.loc[:,'dist_49'].sum() <= 0:
return False
return True
def __generate_roi(self, vert_type, network_csv, seed_radius, l_surface,
r_surface, output):
## make the overlaying ROIs
docmd(['ciftify_surface_rois', '--vertex-col', vert_type, network_csv,
str(seed_radius), l_surface, r_surface, output])
if not os.path.exists(output):
logger.error("Could not generate needed ROIs output file: "
"{}".format(output))
sys.exit(1)
return
def __combine_rois_and_set_palette(self, output_dir):
rois = os.path.join(output_dir, 'rois.dscalar.nii')
## combine xrois and yrois into one roi result
docmd(['wb_command -cifti-math "((x*2)+y)"', rois, '-var','x',
self.xrois, '-var', 'y', self.yrois])
## set the palette on the roi to power_surf (mostly grey)
docmd(['wb_command', '-cifti-palette', rois, 'MODE_AUTO_SCALE', rois,
'-palette-name', 'power_surf'])
if not os.path.exists(rois):
logger.error("Could not generate final ROI file: {}".format(rois))
sys.exit(1)
return rois
def make_seed_corr(self, summary_df, network, func_fnifti, temp_dir):
self.seed_corr = os.path.join(temp_dir, 'scorr{}{}.dscalar.nii'.format(
self.vert_type, network))
meants = self.dataframe.loc[:, summary_df.loc[:, 'NETWORK'] ==
network].mean(axis=1)
temp_nifti_seed = os.path.join(temp_dir, 'seedcorr{}.nii.gz'.format(
network))
## correlated the mean timeseries with the func data
out = np.zeros([func_fnifti.dims[0]*func_fnifti.dims[1]*func_fnifti.dims[2],
1])
for i in np.arange(func_fnifti.data.shape[0]):
out[i] = np.corrcoef(meants, func_fnifti.data[i, :])[0][1]
## reshape data and write it out to a fake nifti file
out = out.reshape([func_fnifti.dims[0], func_fnifti.dims[1],
func_fnifti.dims[2], 1])
out = nib.nifti1.Nifti1Image(out, func_fnifti.affine)
out.to_filename(temp_nifti_seed)
## convert back
docmd(['wb_command','-cifti-convert','-from-nifti',
temp_nifti_seed, func_fnifti.template, self.seed_corr])
docmd(['wb_command', '-cifti-palette', self.seed_corr,
'MODE_AUTO_SCALE_PERCENTAGE', self.seed_corr,
'-palette-name', 'PSYCH-NO-NONE'])
if not os.path.exists(self.seed_corr):
logger.error("Could not generate seed corr file {} for {}"
"".format(self.seed_corr, self.vert_type))
sys.exit(1)
def main():
global DEBUG
global DRYRUN
arguments = docopt(__doc__)
snaps = arguments['snaps']
index = arguments['index']
verbose = arguments['--verbose']
DEBUG = arguments['--debug']
DRYRUN = arguments['--dry-run']
if verbose:
logger.setLevel(logging.INFO)
# Also set level for all loggers in ciftify module (or else will be
# logging.WARN by default)
logging.getLogger('ciftify').setLevel(logging.INFO)
if DEBUG:
logger.setLevel(logging.DEBUG)
logging.getLogger('ciftify').setLevel(logging.DEBUG)
logger.info(arguments)
settings = UserSettings(arguments)
qc_config = ciftify.qc_config.Config(settings.qc_mode)
## make pics and qcpage for each subject
if snaps:
with ciftify.utilities.TempSceneDir(settings.hcp_dir) as scene_dir:
with ciftify.utilities.TempDir() as temp_dir:
logger.debug('Created tempdir {} on host {}'.format(scene_dir,
os.uname()[1]))
logger.info("Making snaps for subject: {}".format(
settings.subject))
ret = run_snaps(settings, qc_config, scene_dir, temp_dir)
return ret
# Start the index html file
if index:
logger.info("Writing Index pages to: {}".format(settings.qc_dir))
ret = write_all_index_pages(settings, qc_config)
return ret
def run_snaps(settings, qc_config, scene_dir, temp_dir):
'''
Do all the qc stuff for the one subject.
'''
qc_subdir = os.path.join(settings.qc_dir, settings.subject)
if os.path.exists(qc_subdir):
logger.info('QC for subject {} already exists... exiting'.format(
settings.subject))
return 0
ciftify.utilities.make_dir(qc_subdir, dry_run=DRYRUN)
func_nifti = FakeNifti(settings.func, temp_dir)
summary_data = SummaryData(settings.pint_summary)
qc_sub_html = os.path.join(qc_subdir, 'qc_sub.html')
with open(qc_sub_html,'w') as qc_sub_page:
write_header_and_navbar(qc_sub_page, settings.subject, PINTnets,
title="{} PINT results".format(settings.subject), path='../')
qc_sub_page.write('<h1> {} PINT results</h1>\n'.format(settings.subject))
write_heat_maps(qc_sub_page, qc_subdir, summary_data)
for pint_dict in PINTnets:
# for each seed vertex make an roi and generate a seed map
## get info from the seed_dict
roiidx = pint_dict['roiidx']
network = pint_dict['NETWORK']
## make a dscalar of the network map
network_csv = os.path.join(temp_dir, 'networkdf.csv')
networkdf = summary_data.dataframe.loc[
summary_data.dataframe.loc[:,'NETWORK'] == network,:]
networkdf.to_csv(network_csv, index=False)
qc_sub_page.write('<div class="container" style="width: 100%;">\n')
qc_sub_page.write(' <h2>Network {}</h2>\n'.format(network))
for vertex in summary_data.vertices:
logging.info('Running {} {} snaps:'.format(network,
vertex.vert_type))
vertex.make_rois(network_csv, networkdf,
settings.left_surface, settings.right_surface,
settings.roi_radius, temp_dir)
vertex.make_seed_corr(summary_data.dataframe, network,
func_nifti, temp_dir)
scene_file = personalize_template(qc_config, settings,
scene_dir, network, vertex)
qc_html = os.path.join(qc_subdir, 'qc_{}{}.html'.format(
vertex.vert_type, network))
with open(qc_html, 'w') as qc_page:
write_subject_page(qc_config, qc_page, scene_file,
settings.subject, qc_subdir, vertex, network)
fav_pic = '{}{}_{}.png'.format(vertex.vert_type, network,
pint_dict['best_view'])
ciftify.html.write_image(qc_sub_page, 12,
os.path.basename(qc_page.name), fav_pic,
"Network {} {}".format(network, vertex.vert_type))
## add a div around the subject page container
qc_sub_page.write('</div>\n')
def write_subjects_page_header(qc_sub_page, subject, network_dict):
qc_sub_page.write('<!DOCTYPE html>\n<HTML><TITLE> {} PINT results'
'</TITLE>\n'.format(subject))
ciftify.html.write_header(qc_sub_page)
qc_sub_page.write('<body>\n')
write_navigation_bar(network_dict)
def write_header_and_navbar(html_page, page_subject, PINTnets,
title='PINT results', path='', active_link=None):
html_page.write('<!DOCTYPE html>\n<HTML><TITLE>{}</TITLE>\n'.format(title))
ciftify.html.write_header(html_page)
html_page.write('<body>\n')
nav_list = [{'href': '', 'label': 'Network:'}]
for pint_dict in PINTnets:
network_page = os.path.join(path, "network_{}.html".format(
pint_dict['NETWORK']))
nav_list.append({'href': network_page,
'label': pint_dict['NETWORK']})
corrmat_page = os.path.join(path, "corrmats.html")
nav_list.append({'href': corrmat_page, 'label':'Correlation Matrixes'})
index_page = os.path.join(path, "index.html")
nav_list.append({'href': index_page, 'label':'Index'})
ciftify.html.write_navbar(html_page, page_subject, nav_list,
activelink=active_link)
def write_heat_maps(qc_page, qc_dir, summary_data):
qc_page.write('<div class="container" style="width: 100%;">')
qc_parent_dir = os.path.dirname(qc_page.name)
for vertex in summary_data.vertices:
heat_map = vertex.make_heat_map(summary_data.dataframe, qc_dir)
map_relpath = os.path.relpath(heat_map, qc_parent_dir)
ciftify.html.write_image(qc_page, 6, map_relpath, map_relpath,
vertex.title)
qc_page.write('</div>\n')
def personalize_template(qc_config, settings, scene_dir, network, vertex):
with open(qc_config.template, 'r') as template_text:
template_contents = template_text.read()
if not template_contents:
logger.error("{} cannot be read or is empty".format(qc_config.template))
sys.exit(1)
scene_file = os.path.join(scene_dir, 'seedcorr_{}_{}{}.scene'.format(
settings.subject, network, vertex.vert_type))
with open(scene_file, 'w') as scene_stream:
scene_text = modify_template_contents(template_contents, scene_file,
settings, vertex)
scene_stream.write(scene_text)
return scene_file
def modify_template_contents(template_contents, scene_file, settings, vertex):
modified_text = template_contents.replace('HCP_DATA_PATH', settings.hcp_dir)
modified_text = modified_text.replace('HCP_DATA_RELPATH', os.path.relpath(
settings.hcp_dir, os.path.dirname(scene_file)))
modified_text = modified_text.replace('SUBJID', settings.subject)
modified_text = modified_text.replace('SEEDMASKDIR', os.path.dirname(
vertex.rois))
modified_text = modified_text.replace('SEEDMASKRELDIR', os.path.relpath(
os.path.dirname(vertex.rois), os.path.dirname(scene_file)))
modified_text = modified_text.replace('SEEDMASKCIFTI', os.path.basename(
vertex.rois))
modified_text = modified_text.replace('SEEDCORRDIR', os.path.dirname(
vertex.seed_corr))
modified_text = modified_text.replace('SEEDCORRRELDIR', os.path.relpath(
os.path.dirname(vertex.seed_corr), os.path.dirname(scene_file)))
modified_text = modified_text.replace('SEEDCORRCIFTI', os.path.basename(
vertex.seed_corr))
return modified_text
# What's this one needed for? Other one better?
###################
def write_header(qc_page, subject, vert_type, network):
qc_page.write('<!DOCTYPE html>\n<HTML><TITLE> {} {}{}</TITLE>\n'.format(
subject, vert_type, network))
ciftify.html.write_header(qc_page)
qc_page.write('<body>\n')
ciftify.html.write_navbar(qc_page,
"{} Network {} {}".format(subject, network, vert_type),
[{ 'href': "qc_sub.html", 'label': "Return to Subject Page"}])
qc_page.write('<h1> {} network {} {} seed correlation </h1>\n'.format(
subject, network, vert_type))
def write_subject_page(qc_config, qc_page, scene_file, subject, qc_subdir,
vertex, network):
write_header(qc_page, subject, vertex.vert_type, network)
for image in qc_config.images:
pic_name = '{}{}_{}.png'.format(vertex.vert_type,
network, image.name)
ciftify.html.write_image(qc_page, 12, pic_name,
pic_name, "")
output_path = os.path.join(qc_subdir, pic_name)
image.make_image(output_path, scene_file)
def write_index_body(html_page, subjects, PINTnets):
## writing the lists to the main index page
html_page.write('<h1>PINT results index</h1>\n')
html_page.write('<h2>All subjects together</h2>\n')
html_page.write('<ul>\n ')
html_page.write('<li><a href="corrmats.html">Correlation Matrixes</a>'
'</li>\n')
for pint_dict in PINTnets:
html_page.write('<li><a href="network_{}.html">Network {} Seed'
' Correlations</a></li>\n'.format(pint_dict['NETWORK'],
pint_dict['NETWORK']))
html_page.write('</ul>\n')
html_page.write('<h2>Subject Pages</h2>\n')
html_page.write('<ul>\n ')
for subject in subjects:
html_page.write('<li><a href="{}/qc_sub.html">{}</a></li>\n'
''.format(subject, subject))
html_page.write('</ul>\n')
html_page.write('</body>')
def write_all_index_pages(settings, qc_config):
'''
Makes all the indices.
'''
# get the subjects list
subjects = ciftify.utilities.get_subj(settings.qc_dir)
if settings.subject_filter:
subjects = list(filter(lambda x: settings.subject_filter in x, subjects))
index_html = os.path.join(settings.qc_dir, 'index.html')
with open(index_html, 'w') as main_index:
write_header_and_navbar(main_index, 'PINT results', PINTnets,
active_link="index.html")
write_index_body(main_index, subjects, PINTnets)
# write the corrmat index
write_pic_index(settings.qc_dir, subjects, '_corrmat.png',
"theme-table-image col-sm-6", 'corrmats.html',
"Correlation Matrixes")
for pint_dict in PINTnets:
write_pic_index(settings.qc_dir, subjects,
'{}_{}.png'.format(pint_dict['NETWORK'], pint_dict['best_view']),
"theme-table-image col-sm-12", 'network_{}.html'.format(
pint_dict['NETWORK']), "Network {} Index".format(
pint_dict['NETWORK']))
return 0
### Erin's little function for running things in the shell
def docmd(cmdlist):
"sends a command (inputed as a list) to the shell"
global DRYRUN
global DEBUG
echo_cmd = True if DEBUG else False
supress_stdout = False
if "math" in cmdlist[0]: supress_stdout = True
ciftify.utilities.run(cmdlist, dryrun=DRYRUN, echo=echo_cmd,
supress_stdout=supress_stdout)
def write_pic_index(qc_dir, subjects, pic_ending, col_width, index_name, title):
'''
Writes html file with all subjects for one pic shown together
'''
html_index = os.path.join(qc_dir, index_name)
with open(html_index, 'w') as pic_page:
write_header_and_navbar(pic_page, 'PINT_results', PINTnets, title=title,
active_link=index_name)
pic_page.write('<h1>{}</h1>\n'.format(title))
for subject in subjects:
subject_page = os.path.join(qc_dir, subject, 'qc_sub.html')
pic_page.write('<div class="container" style="width: 100%;">')
for vert_type in SummaryData.vertex_types:
pic = os.path.join(qc_dir, subject, '{}{}'.format(vert_type,
pic_ending))
pic_rel_path = os.path.relpath(pic, os.path.dirname(
pic_page.name))
subject_rel_path = os.path.relpath(subject_page,
os.path.dirname(pic_page.name))
ciftify.html.write_image(pic_page, col_width, subject_rel_path,
pic_rel_path, "{} {}".format(subject, vert_type))
pic_page.write('</div>\n</br>')
pic_page.write('</body>\n')
if __name__=='__main__':
ret = main()
sys.exit(ret)
| mit |
arahuja/scikit-learn | sklearn/tests/test_dummy.py | 4 | 17840 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba(X[0]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
vamsirajendra/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| agpl-3.0 |
toobaz/pandas | pandas/tests/dtypes/cast/test_infer_dtype.py | 2 | 5117 | from datetime import date, datetime, timedelta
import numpy as np
import pytest
from pandas.core.dtypes.cast import (
cast_scalar_to_array,
infer_dtype_from_array,
infer_dtype_from_scalar,
)
from pandas.core.dtypes.common import is_dtype_equal
from pandas import Categorical, Period, Series, Timedelta, Timestamp, date_range
from pandas.util import testing as tm
@pytest.fixture(params=[True, False])
def pandas_dtype(request):
return request.param
def test_infer_dtype_from_int_scalar(any_int_dtype):
# Test that infer_dtype_from_scalar is
# returning correct dtype for int and float.
data = np.dtype(any_int_dtype).type(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == type(data)
def test_infer_dtype_from_float_scalar(float_dtype):
float_dtype = np.dtype(float_dtype).type
data = float_dtype(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == float_dtype
@pytest.mark.parametrize("data,exp_dtype", [(12, np.int64), (np.float(12), np.float64)])
def test_infer_dtype_from_python_scalar(data, exp_dtype):
dtype, val = infer_dtype_from_scalar(data)
assert dtype == exp_dtype
@pytest.mark.parametrize("bool_val", [True, False])
def test_infer_dtype_from_boolean(bool_val):
dtype, val = infer_dtype_from_scalar(bool_val)
assert dtype == np.bool_
def test_infer_dtype_from_complex(complex_dtype):
data = np.dtype(complex_dtype).type(1)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.complex_
@pytest.mark.parametrize(
"data", [np.datetime64(1, "ns"), Timestamp(1), datetime(2000, 1, 1, 0, 0)]
)
def test_infer_dtype_from_datetime(data):
dtype, val = infer_dtype_from_scalar(data)
assert dtype == "M8[ns]"
@pytest.mark.parametrize("data", [np.timedelta64(1, "ns"), Timedelta(1), timedelta(1)])
def test_infer_dtype_from_timedelta(data):
dtype, val = infer_dtype_from_scalar(data)
assert dtype == "m8[ns]"
@pytest.mark.parametrize("freq", ["M", "D"])
def test_infer_dtype_from_period(freq, pandas_dtype):
p = Period("2011-01-01", freq=freq)
dtype, val = infer_dtype_from_scalar(p, pandas_dtype=pandas_dtype)
if pandas_dtype:
exp_dtype = "period[{0}]".format(freq)
exp_val = p.ordinal
else:
exp_dtype = np.object_
exp_val = p
assert dtype == exp_dtype
assert val == exp_val
@pytest.mark.parametrize(
"data", [date(2000, 1, 1), "foo", Timestamp(1, tz="US/Eastern")]
)
def test_infer_dtype_misc(data):
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.object_
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo"])
def test_infer_from_scalar_tz(tz, pandas_dtype):
dt = Timestamp(1, tz=tz)
dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=pandas_dtype)
if pandas_dtype:
exp_dtype = "datetime64[ns, {0}]".format(tz)
exp_val = dt.value
else:
exp_dtype = np.object_
exp_val = dt
assert dtype == exp_dtype
assert val == exp_val
def test_infer_dtype_from_scalar_errors():
msg = "invalid ndarray passed to infer_dtype_from_scalar"
with pytest.raises(ValueError, match=msg):
infer_dtype_from_scalar(np.array([1]))
@pytest.mark.parametrize(
"arr, expected, pandas_dtype",
[
("foo", np.object_, False),
(b"foo", np.object_, False),
(1, np.int_, False),
(1.5, np.float_, False),
([1], np.int_, False),
(np.array([1], dtype=np.int64), np.int64, False),
([np.nan, 1, ""], np.object_, False),
(np.array([[1.0, 2.0]]), np.float_, False),
(Categorical(list("aabc")), np.object_, False),
(Categorical([1, 2, 3]), np.int64, False),
(Categorical(list("aabc")), "category", True),
(Categorical([1, 2, 3]), "category", True),
(Timestamp("20160101"), np.object_, False),
(np.datetime64("2016-01-01"), np.dtype("=M8[D]"), False),
(date_range("20160101", periods=3), np.dtype("=M8[ns]"), False),
(
date_range("20160101", periods=3, tz="US/Eastern"),
"datetime64[ns, US/Eastern]",
True,
),
(Series([1.0, 2, 3]), np.float64, False),
(Series(list("abc")), np.object_, False),
(
Series(date_range("20160101", periods=3, tz="US/Eastern")),
"datetime64[ns, US/Eastern]",
True,
),
],
)
def test_infer_dtype_from_array(arr, expected, pandas_dtype):
dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype)
assert is_dtype_equal(dtype, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(1, np.int64),
(1.1, np.float64),
(Timestamp("2011-01-01"), "datetime64[ns]"),
(Timestamp("2011-01-01", tz="US/Eastern"), np.object),
(Period("2011-01-01", freq="D"), np.object),
],
)
def test_cast_scalar_to_array(obj, dtype):
shape = (3, 2)
exp = np.empty(shape, dtype=dtype)
exp.fill(obj)
arr = cast_scalar_to_array(shape, obj, dtype=dtype)
tm.assert_numpy_array_equal(arr, exp)
| bsd-3-clause |
zihua/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
microelly2/reconstruction | reconstruction/CV_cornerharris.py | 1 | 4491 | # -*- coding: utf-8 -*-
#-------------------------------------------------
#-- reconstruction workbench
#--
#-- microelly 2016 v 0.1
#--
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
__vers__="13.03.2016 0.1"
__dir__='/home/thomas/.FreeCAD/Mod/reconstruction'
import sympy
from sympy import Point3D,Plane
import PySide
from PySide import QtCore, QtGui
import FreeCAD,FreeCADGui
import cv2
import numpy as np
import reconstruction
reload (reconstruction.projectiontools)
from reconstruction.projectiontools import *
from reconstruction.CV import _CV, _ViewProviderCV, createCV
reload(reconstruction.CV )
import reconstruction.miki as miki
reload(miki)
class _CV_cornerharris(_CV):
def __init__(self,obj,icon='/icons/animation.png'):
_CV.__init__(self,obj,icon)
_ViewProviderCV_cornerharris(obj.ViewObject,icon)
def execute(self,obj):
obj.ViewObject.Proxy.animpingpong()
return
class _ViewProviderCV_cornerharris(_ViewProviderCV):
def __init__(self,vobj,icon):
_ViewProviderCV.__init__(self,vobj,icon)
def showVersion(self):
cl=self.Object.Proxy.__class__.__name__
PySide.QtGui.QMessageBox.information(None, "About ", cl +"_\nVersion " + __vers__)
def edit(self):
_ViewProviderCV.edit(self)
self.animpingpong()
def animpingpong(self):
obj=self.Object
img=None
if not obj.imageFromNode:
img = cv2.imread(obj.imageFile)
else:
img = obj.imageNode.ViewObject.Proxy.img.copy()
print (obj.blockSize,obj.ksize,obj.k)
try:
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
print "normale"
except:
im2=cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
gray = cv2.cvtColor(im2,cv2.COLOR_RGB2GRAY)
print "except"
dst = cv2.cornerHarris(gray,obj.blockSize,obj.ksize*2+1,obj.k/10000)
dst = cv2.dilate(dst,None)
img[dst>0.01*dst.max()]=[0,0,255]
dst2=img.copy()
dst2[dst<0.01*dst.max()]=[255,255,255]
dst2[dst>0.01*dst.max()]=[0,0,255]
if not obj.matplotlib:
cv2.imshow(obj.Label,img)
else:
from matplotlib import pyplot as plt
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst2,cmap = 'gray')
plt.title('Corner Image'), plt.xticks([]), plt.yticks([])
plt.show()
self.img=img
s6='''
VerticalLayout:
id:'main'
QtGui.QLabel:
setText:"*** O P E N C V ***"
QtGui.QLabel:
QtGui.QLabel:
setText:"*** Harris Corner Detection ***"
QtGui.QLabel:
QtGui.QLabel:
id:'blockSizeLabel'
setText:"BlockSize"
QtGui.QSlider:
id:'blockSize'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 10
# setTickInterval: 10
setValue: 2
setTickPosition: QtGui.QSlider.TicksBelow
valueChanged.connect: app.change
QtGui.QLabel:
id:'ksizeLabel'
setText:"ksize"
QtGui.QSlider:
id:'ksize'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 15
setValue: 1
# site *2 +1 --> 3
setTickPosition: QtGui.QSlider.TicksBelow
valueChanged.connect: app.change
QtGui.QLabel:
id:'kLabel'
setText:"k"
QtGui.QSlider:
id:'k'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 10000
setValue: 1
# val/10000
valueChanged.connect: app.change
QtGui.QPushButton:
id:'moveBtn'
setText: "update view"
clicked.connect: app.create
setEnabled: False
'''
class MyApp(object):
def create(self):
self.obj.Proxy.execute(self.obj)
def change(self):
print "changed"
self.obj.k=self.root.ids['k'].value()
self.obj.ksize=self.root.ids['ksize'].value()
self.obj.blockSize=self.root.ids['blockSize'].value()
self.root.ids['kLabel'].setText("k " + str(round((0.0+self.obj.k)/10000,5)))
self.root.ids['ksizeLabel'].setText("ksize " + str(self.obj.ksize*2+1))
self.root.ids['blockSizeLabel'].setText("blockSize " + str(self.obj.blockSize))
self.obj.Proxy.execute(self.obj)
def createCV_cornerharris():
print "create CV cornerharris ... 2"
obj= createCV(True)
obj.Label='Harris'
obj.addProperty('App::PropertyInteger','blockSize',"cornerHarris").blockSize=2
obj.addProperty('App::PropertyInteger','ksize',"cornerHarris").ksize=3
obj.addProperty('App::PropertyFloat','k',"cornerHarris").k=1.0
_CV_cornerharris(obj,__dir__+ '/icons/icon2.svg')
miki2=miki.Miki2(MyApp,s6,obj)
return obj
def run():
return createCV_cornerharris()
| lgpl-3.0 |
dsquareindia/scikit-learn | sklearn/linear_model/logistic.py | 13 | 67587 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
Returns
-------
w : ndarray, shape (n_features,)
Coefficient vector without the intercept weight (w[-1]) if the
intercept should be fit. Unchanged otherwise.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Unchanged.
yz : float
y * np.dot(X, w).
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
yz = y * z
return w, c, yz
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(n_samples)
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Note that there will be no speedup with liblinear solver, since it does
not handle warm-starting.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
.. versionchanged:: 0.19
The "copy" parameter was removed.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
if class_weight == "balanced":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver != 'sag':
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, None,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr', and uses the cross-
entropy loss if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'*
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, default: None
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}, default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, default: 1e-4
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, default: 1
Number of CPU cores used when parallelizing over classes
if multi_class='ovr'".
If given a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach
Minimizing Finite Sums with the Stochastic Average Gradient
https://hal.inria.fr/hal-00860051/document
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : string, callable, or None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``. For a list of scoring functions
that can be used, look at :mod:`sklearn.metrics`. The
default scoring option used is 'accuracy'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
intercept_scaling : float, default 1.
Useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equal to
intercept_scaling is appended to the instance vector.
The intercept becomes ``intercept_scaling * synthetic_feature_weight``.
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
`intercept_` is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
class_weight = self.class_weight
# Encode for string labels
label_encoder = LabelEncoder().fit(y)
y = label_encoder.transform(y)
if isinstance(class_weight, dict):
class_weight = dict((label_encoder.transform([cls])[0], v)
for cls, v in class_weight.items())
# The original class labels
classes = self.classes_ = label_encoder.classes_
encoded_labels = label_encoder.transform(label_encoder.classes_)
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
# Use the label encoded classes
n_classes = len(encoded_labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
encoded_labels = encoded_labels[1:]
classes = classes[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
if self.multi_class == 'multinomial':
iter_encoded_labels = iter_classes = [None]
else:
iter_encoded_labels = encoded_labels
iter_classes = classes
# compute the class weights for the entire dataset y
if class_weight == "balanced":
class_weight = compute_class_weight(class_weight,
np.arange(len(self.classes_)),
y)
class_weight = dict(enumerate(class_weight))
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_encoded_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(classes, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(classes, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, (cls, encoded_label) in enumerate(
zip(iter_classes, iter_encoded_labels)):
if self.multi_class == 'ovr':
# The scores_ / coefs_paths_ dict have unencoded class
# labels as their keys
scores = self.scores_[cls]
coefs_paths = self.coefs_paths_[cls]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
# Note that y is label encoded and hence pos_class must be
# the encoded label / None (for 'multinomial')
w, _, _ = logistic_regression_path(
X, y, pos_class=encoded_label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
amolkahat/pandas | pandas/io/formats/excel.py | 3 | 24632 | """Utilities for conversion to writer-agnostic Excel representation
"""
import re
import warnings
import itertools
import numpy as np
from pandas.compat import reduce
import pandas.core.common as com
from pandas.core.dtypes.common import is_float, is_scalar
from pandas.core.dtypes import missing
from pandas.core.dtypes.generic import ABCMultiIndex, ABCPeriodIndex
from pandas import Index
from pandas.io.formats.css import CSSResolver, CSSWarning
from pandas.io.formats.printing import pprint_thing
from pandas.io.formats.format import get_level_lengths
class ExcelCell(object):
__fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend')
__slots__ = __fields__
def __init__(self, row, col, val, style=None, mergestart=None,
mergeend=None):
self.row = row
self.col = col
self.val = val
self.style = style
self.mergestart = mergestart
self.mergeend = mergeend
class CSSToExcelConverter(object):
"""A callable for converting CSS declarations to ExcelWriter styles
Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),
focusing on font styling, backgrounds, borders and alignment.
Operates by first computing CSS styles in a fairly generic
way (see :meth:`compute_css`) then determining Excel style
properties from CSS properties (see :meth:`build_xlstyle`).
Parameters
----------
inherited : str, optional
CSS declarations understood to be the containing scope for the
CSS processed by :meth:`__call__`.
"""
# NB: Most of the methods here could be classmethods, as only __init__
# and __call__ make use of instance attributes. We leave them as
# instancemethods so that users can easily experiment with extensions
# without monkey-patching.
def __init__(self, inherited=None):
if inherited is not None:
inherited = self.compute_css(inherited,
self.compute_css.INITIAL_STYLE)
self.inherited = inherited
compute_css = CSSResolver()
def __call__(self, declarations_str):
"""Convert CSS declarations to ExcelWriter style
Parameters
----------
declarations_str : str
List of CSS declarations.
e.g. "font-weight: bold; background: blue"
Returns
-------
xlstyle : dict
A style as interpreted by ExcelWriter when found in
ExcelCell.style.
"""
# TODO: memoize?
properties = self.compute_css(declarations_str, self.inherited)
return self.build_xlstyle(properties)
def build_xlstyle(self, props):
out = {
'alignment': self.build_alignment(props),
'border': self.build_border(props),
'fill': self.build_fill(props),
'font': self.build_font(props),
'number_format': self.build_number_format(props),
}
# TODO: handle cell width and height: needs support in pandas.io.excel
def remove_none(d):
"""Remove key where value is None, through nested dicts"""
for k, v in list(d.items()):
if v is None:
del d[k]
elif isinstance(v, dict):
remove_none(v)
if not v:
del d[k]
remove_none(out)
return out
VERTICAL_MAP = {
'top': 'top',
'text-top': 'top',
'middle': 'center',
'baseline': 'bottom',
'bottom': 'bottom',
'text-bottom': 'bottom',
# OpenXML also has 'justify', 'distributed'
}
def build_alignment(self, props):
# TODO: text-indent, padding-left -> alignment.indent
return {'horizontal': props.get('text-align'),
'vertical': self.VERTICAL_MAP.get(props.get('vertical-align')),
'wrap_text': (None if props.get('white-space') is None else
props['white-space'] not in
('nowrap', 'pre', 'pre-line'))
}
def build_border(self, props):
return {side: {
'style': self._border_style(props.get('border-{side}-style'
.format(side=side)),
props.get('border-{side}-width'
.format(side=side))),
'color': self.color_to_excel(
props.get('border-{side}-color'.format(side=side))),
} for side in ['top', 'right', 'bottom', 'left']}
def _border_style(self, style, width):
# convert styles and widths to openxml, one of:
# 'dashDot'
# 'dashDotDot'
# 'dashed'
# 'dotted'
# 'double'
# 'hair'
# 'medium'
# 'mediumDashDot'
# 'mediumDashDotDot'
# 'mediumDashed'
# 'slantDashDot'
# 'thick'
# 'thin'
if width is None and style is None:
return None
if style == 'none' or style == 'hidden':
return None
if width is None:
width = '2pt'
width = float(width[:-2])
if width < 1e-5:
return None
elif width < 1.3:
width_name = 'thin'
elif width < 2.8:
width_name = 'medium'
else:
width_name = 'thick'
if style in (None, 'groove', 'ridge', 'inset', 'outset'):
# not handled
style = 'solid'
if style == 'double':
return 'double'
if style == 'solid':
return width_name
if style == 'dotted':
if width_name in ('hair', 'thin'):
return 'dotted'
return 'mediumDashDotDot'
if style == 'dashed':
if width_name in ('hair', 'thin'):
return 'dashed'
return 'mediumDashed'
def build_fill(self, props):
# TODO: perhaps allow for special properties
# -excel-pattern-bgcolor and -excel-pattern-type
fill_color = props.get('background-color')
if fill_color not in (None, 'transparent', 'none'):
return {
'fgColor': self.color_to_excel(fill_color),
'patternType': 'solid',
}
BOLD_MAP = {'bold': True, 'bolder': True, '600': True, '700': True,
'800': True, '900': True,
'normal': False, 'lighter': False, '100': False, '200': False,
'300': False, '400': False, '500': False}
ITALIC_MAP = {'normal': False, 'italic': True, 'oblique': True}
def build_font(self, props):
size = props.get('font-size')
if size is not None:
assert size.endswith('pt')
size = float(size[:-2])
font_names_tmp = re.findall(r'''(?x)
(
"(?:[^"]|\\")+"
|
'(?:[^']|\\')+'
|
[^'",]+
)(?=,|\s*$)
''', props.get('font-family', ''))
font_names = []
for name in font_names_tmp:
if name[:1] == '"':
name = name[1:-1].replace('\\"', '"')
elif name[:1] == '\'':
name = name[1:-1].replace('\\\'', '\'')
else:
name = name.strip()
if name:
font_names.append(name)
family = None
for name in font_names:
if name == 'serif':
family = 1 # roman
break
elif name == 'sans-serif':
family = 2 # swiss
break
elif name == 'cursive':
family = 4 # script
break
elif name == 'fantasy':
family = 5 # decorative
break
decoration = props.get('text-decoration')
if decoration is not None:
decoration = decoration.split()
else:
decoration = ()
return {
'name': font_names[0] if font_names else None,
'family': family,
'size': size,
'bold': self.BOLD_MAP.get(props.get('font-weight')),
'italic': self.ITALIC_MAP.get(props.get('font-style')),
'underline': ('single' if
'underline' in decoration
else None),
'strike': ('line-through' in decoration) or None,
'color': self.color_to_excel(props.get('color')),
# shadow if nonzero digit before shadow color
'shadow': (bool(re.search('^[^#(]*[1-9]',
props['text-shadow']))
if 'text-shadow' in props else None),
# 'vertAlign':,
# 'charset': ,
# 'scheme': ,
# 'outline': ,
# 'condense': ,
}
NAMED_COLORS = {
'maroon': '800000',
'brown': 'A52A2A',
'red': 'FF0000',
'pink': 'FFC0CB',
'orange': 'FFA500',
'yellow': 'FFFF00',
'olive': '808000',
'green': '008000',
'purple': '800080',
'fuchsia': 'FF00FF',
'lime': '00FF00',
'teal': '008080',
'aqua': '00FFFF',
'blue': '0000FF',
'navy': '000080',
'black': '000000',
'gray': '808080',
'grey': '808080',
'silver': 'C0C0C0',
'white': 'FFFFFF',
}
def color_to_excel(self, val):
if val is None:
return None
if val.startswith('#') and len(val) == 7:
return val[1:].upper()
if val.startswith('#') and len(val) == 4:
return (val[1] * 2 + val[2] * 2 + val[3] * 2).upper()
try:
return self.NAMED_COLORS[val]
except KeyError:
warnings.warn('Unhandled color format: {val!r}'.format(val=val),
CSSWarning)
def build_number_format(self, props):
return {'format_code': props.get('number-format')}
class ExcelFormatter(object):
"""
Class for formatting a DataFrame to a list of ExcelCells,
Parameters
----------
df : DataFrame or Styler
na_rep: na representation
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
output row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
merge_cells : boolean, default False
Format MultiIndex and Hierarchical Rows as merged cells.
inf_rep : string, default `'inf'`
representation for np.inf values (which aren't representable in Excel)
A `'-'` sign will be added in front of -inf.
style_converter : callable, optional
This translates Styler styles (CSS) into ExcelWriter styles.
Defaults to ``CSSToExcelConverter()``.
It should have signature css_declarations string -> excel style.
This is only called for body cells.
"""
def __init__(self, df, na_rep='', float_format=None, cols=None,
header=True, index=True, index_label=None, merge_cells=False,
inf_rep='inf', style_converter=None):
self.rowcounter = 0
self.na_rep = na_rep
if hasattr(df, 'render'):
self.styler = df
df = df.data
if style_converter is None:
style_converter = CSSToExcelConverter()
self.style_converter = style_converter
else:
self.styler = None
self.df = df
if cols is not None:
# all missing, raise
if not len(Index(cols) & df.columns):
raise KeyError(
"passes columns are not ALL present dataframe")
# deprecatedin gh-17295
# 1 missing is ok (for now)
if len(Index(cols) & df.columns) != len(cols):
warnings.warn(
"Not all names specified in 'columns' are found; "
"this will raise a KeyError in the future",
FutureWarning)
self.df = df.reindex(columns=cols)
self.columns = self.df.columns
self.float_format = float_format
self.index = index
self.index_label = index_label
self.header = header
self.merge_cells = merge_cells
self.inf_rep = inf_rep
@property
def header_style(self):
return {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center",
"vertical": "top"}}
def _format_value(self, val):
if is_scalar(val) and missing.isna(val):
val = self.na_rep
elif is_float(val):
if missing.isposinf_scalar(val):
val = self.inf_rep
elif missing.isneginf_scalar(val):
val = '-{inf}'.format(inf=self.inf_rep)
elif self.float_format is not None:
val = float(self.float_format % val)
return val
def _format_header_mi(self):
if self.columns.nlevels > 1:
if not self.index:
raise NotImplementedError("Writing to Excel with MultiIndex"
" columns and no index "
"('index'=False) is not yet "
"implemented.")
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if not (has_aliases or self.header):
return
columns = self.columns
level_strs = columns.format(sparsify=self.merge_cells, adjoin=False,
names=False)
level_lengths = get_level_lengths(level_strs)
coloffset = 0
lnum = 0
if self.index and isinstance(self.df.index, ABCMultiIndex):
coloffset = len(self.df.index[0]) - 1
if self.merge_cells:
# Format multi-index as a merged cells.
for lnum in range(len(level_lengths)):
name = columns.names[lnum]
yield ExcelCell(lnum, coloffset, name, self.header_style)
for lnum, (spans, levels, labels) in enumerate(zip(
level_lengths, columns.levels, columns.labels)):
values = levels.take(labels)
for i in spans:
if spans[i] > 1:
yield ExcelCell(lnum, coloffset + i + 1, values[i],
self.header_style, lnum,
coloffset + i + spans[i])
else:
yield ExcelCell(lnum, coloffset + i + 1, values[i],
self.header_style)
else:
# Format in legacy format with dots to indicate levels.
for i, values in enumerate(zip(*level_strs)):
v = ".".join(map(pprint_thing, values))
yield ExcelCell(lnum, coloffset + i + 1, v, self.header_style)
self.rowcounter = lnum
def _format_header_regular(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
coloffset = 0
if self.index:
coloffset = 1
if isinstance(self.df.index, ABCMultiIndex):
coloffset = len(self.df.index[0])
colnames = self.columns
if has_aliases:
if len(self.header) != len(self.columns):
raise ValueError('Writing {cols} cols but got {alias} '
'aliases'.format(cols=len(self.columns),
alias=len(self.header)))
else:
colnames = self.header
for colindex, colname in enumerate(colnames):
yield ExcelCell(self.rowcounter, colindex + coloffset, colname,
self.header_style)
def _format_header(self):
if isinstance(self.columns, ABCMultiIndex):
gen = self._format_header_mi()
else:
gen = self._format_header_regular()
gen2 = ()
if self.df.index.names:
row = [x if x is not None else ''
for x in self.df.index.names] + [''] * len(self.columns)
if reduce(lambda x, y: x and y, map(lambda x: x != '', row)):
gen2 = (ExcelCell(self.rowcounter, colindex, val,
self.header_style)
for colindex, val in enumerate(row))
self.rowcounter += 1
return itertools.chain(gen, gen2)
def _format_body(self):
if isinstance(self.df.index, ABCMultiIndex):
return self._format_hierarchical_rows()
else:
return self._format_regular_rows()
def _format_regular_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
# output index and index_label?
if self.index:
# check aliases
# if list only take first as this is not a MultiIndex
if (self.index_label and
isinstance(self.index_label, (list, tuple, np.ndarray,
Index))):
index_label = self.index_label[0]
# if string good to go
elif self.index_label and isinstance(self.index_label, str):
index_label = self.index_label
else:
index_label = self.df.index.names[0]
if isinstance(self.columns, ABCMultiIndex):
self.rowcounter += 1
if index_label and self.header is not False:
yield ExcelCell(self.rowcounter - 1, 0, index_label,
self.header_style)
# write index_values
index_values = self.df.index
if isinstance(self.df.index, ABCPeriodIndex):
index_values = self.df.index.to_timestamp()
for idx, idxval in enumerate(index_values):
yield ExcelCell(self.rowcounter + idx, 0, idxval,
self.header_style)
coloffset = 1
else:
coloffset = 0
for cell in self._generate_body(coloffset):
yield cell
def _format_hierarchical_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
gcolidx = 0
if self.index:
index_labels = self.df.index.names
# check for aliases
if (self.index_label and
isinstance(self.index_label, (list, tuple, np.ndarray,
Index))):
index_labels = self.index_label
# MultiIndex columns require an extra row
# with index names (blank if None) for
# unambigous round-trip, unless not merging,
# in which case the names all go on one row Issue #11328
if isinstance(self.columns, ABCMultiIndex) and self.merge_cells:
self.rowcounter += 1
# if index labels are not empty go ahead and dump
if com._any_not_none(*index_labels) and self.header is not False:
for cidx, name in enumerate(index_labels):
yield ExcelCell(self.rowcounter - 1, cidx, name,
self.header_style)
if self.merge_cells:
# Format hierarchical rows as merged cells.
level_strs = self.df.index.format(sparsify=True, adjoin=False,
names=False)
level_lengths = get_level_lengths(level_strs)
for spans, levels, labels in zip(level_lengths,
self.df.index.levels,
self.df.index.labels):
values = levels.take(labels,
allow_fill=levels._can_hold_na,
fill_value=True)
for i in spans:
if spans[i] > 1:
yield ExcelCell(self.rowcounter + i, gcolidx,
values[i], self.header_style,
self.rowcounter + i + spans[i] - 1,
gcolidx)
else:
yield ExcelCell(self.rowcounter + i, gcolidx,
values[i], self.header_style)
gcolidx += 1
else:
# Format hierarchical rows with non-merged values.
for indexcolvals in zip(*self.df.index):
for idx, indexcolval in enumerate(indexcolvals):
yield ExcelCell(self.rowcounter + idx, gcolidx,
indexcolval, self.header_style)
gcolidx += 1
for cell in self._generate_body(gcolidx):
yield cell
def _generate_body(self, coloffset):
if self.styler is None:
styles = None
else:
styles = self.styler._compute().ctx
if not styles:
styles = None
xlstyle = None
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = self.df.iloc[:, colidx]
for i, val in enumerate(series):
if styles is not None:
xlstyle = self.style_converter(';'.join(styles[i, colidx]))
yield ExcelCell(self.rowcounter + i, colidx + coloffset, val,
xlstyle)
def get_formatted_cells(self):
for cell in itertools.chain(self._format_header(),
self._format_body()):
cell.val = self._format_value(cell.val)
yield cell
def write(self, writer, sheet_name='Sheet1', startrow=0,
startcol=0, freeze_panes=None, engine=None):
"""
writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
freeze_panes : tuple of integer (length 2), default None
Specifies the one-based bottommost row and rightmost column that
is to be frozen
engine : string, default None
write engine to use if writer is a path - you can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``,
and ``io.excel.xlsm.writer``.
"""
from pandas.io.excel import ExcelWriter
from pandas.io.common import _stringify_path
if isinstance(writer, ExcelWriter):
need_save = False
else:
writer = ExcelWriter(_stringify_path(writer), engine=engine)
need_save = True
formatted_cells = self.get_formatted_cells()
writer.write_cells(formatted_cells, sheet_name,
startrow=startrow, startcol=startcol,
freeze_panes=freeze_panes)
if need_save:
writer.save()
| bsd-3-clause |
belltailjp/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
matthewzhenggong/fiwt | XbeeZBS2Test/setup_CmdWifi.py | 1 | 2861 |
from distutils.core import setup
import py2exe
import sys
import os
manifest='''<?xml version='1.0' encoding='UTF-8' standalone='yes'?>
<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='asInvoker' uiAccess='false' />
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type='win32'
name='Microsoft.VC90.CRT'
version='9.0.21022.8'
processorArchitecture='*'
publicKeyToken='1fc8b3b9a1e18e3b' />
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="*"
publicKeyToken="6595b64144ccf1df"
language="*" />
</dependentAssembly>
</dependency>
</assembly>
'''
# Remove the build folder, a bit slower but ensures that build contains the latest
import shutil
shutil.rmtree("build", ignore_errors=True)
shutil.rmtree("dist", ignore_errors=True)
# my setup.py is based on one generated with gui2exe, so data_files is done a bit differently
includes = []
excludes = ['_gtkagg', '_tkagg', 'bsddb', 'curses', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl',
'Tkconstants', 'Tkinter', 'pydoc', 'doctest', 'test', 'sqlite3'
]
packages = []
dll_excludes = ['libgdk-win32-2.0-0.dll', 'libgobject-2.0-0.dll', 'tcl84.dll',
'tk84.dll']
icon_resources = []
bitmap_resources = []
other_resources = []
# add the mpl mpl-data folder and rc file
#import matplotlib as mpl
#data_files = mpl.get_py2exe_datafiles()
data_files = []
options = {"py2exe":
{ "compressed": 2,
"optimize": 1,
"includes": includes,
"excludes": excludes,
"packages": packages,
"dll_excludes": dll_excludes,
"bundle_files": 1,
"dist_dir": 'dist',
"xref": False,
"skip_archive": False,
"ascii": False,
"custom_boot_script": '',
}
}
setup(
version = "1.0.0",
description = "Command, Sample and Control ",
name = "CommandWiFi",
options = options,
zipfile=None,
data_files=data_files,
windows=[{
'script':"CommandWiFi.py",
'other_resources' : [(24, 1, manifest)]
}]
)
try :
os.remove('CommandWiFi.exe')
except :
pass
shutil.copy('dist\\CommandWiFi.exe', '.')
shutil.rmtree("build", ignore_errors=True)
shutil.rmtree("dist", ignore_errors=True)
| lgpl-3.0 |
pombo-lab/gamtools | lib/gamtools/qc/merge.py | 1 | 1933 | """
===================
The qc.merge module
===================
The qc.merge module contains functions for merging separate qc stats files
into a single table.
"""
import pandas as pd
def merge_stats(input_stats_files, output_merged_file):
"""
Merge a list of dataframes together based on their index columns.
:param list input_stats_files: String paths to input dataframes.
:param str output_merged_file: Path to save output dataframe.
"""
first_file = input_stats_files[0]
base = pd.read_csv(first_file, delim_whitespace=True, index_col=0)
for stats_file_path in input_stats_files[1:]:
stats_file = pd.read_csv(stats_file_path, delim_whitespace=True, index_col=0)
base = pd.merge(base, stats_file, left_index=True, right_index=True)
final_df = check_index_column(input_stats_files, base)
final_df.to_csv(output_merged_file, sep='\t')
def check_index_column(input_stats_files, merged_df):
"""
After merging several stats files together, check that the index column
(i.e. the sample name) has not been converted to an integer or a float.
:param list input_stats_files: String paths to input dataframes.
:param merged_df: Pandas dataframe of merged statistics files.
:returns: Pandas dataframe with corrected index column.
"""
try:
first_col = pd.read_csv(input_stats_files[0], delim_whitespace=True, dtype=str).iloc[:, 0]
except pd.io.common.EmptyDataError:
input_stats_files[0].seek(0)
first_col = pd.read_csv(input_stats_files[0], delim_whitespace=True, dtype=str).iloc[:, 0]
if not all(merged_df.index.get_level_values(0) == first_col):
merged_df.index = first_col
return merged_df
def merge_stats_from_doit(dependencies, targets):
"""
Wrapper function to call merge_stats from argparse.
"""
assert len(targets) == 1
merge_stats(list(dependencies), targets[0])
| apache-2.0 |
loli/sklearn-ensembletrees | sklearn/feature_extraction/hashing.py | 29 | 5648 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : NumPy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
RichardLeeK/MachineLearning | MachineLearning/SignalImage/main.py | 1 | 1948 | import autoencoder as ae
import manifold as mf
import process as pc
import matplotlib.pyplot as plt
import datetime
if __name__ == '__main__':
path = 'data/interpolate_gos/'
signals, filenames = ae.load_data(path)
gos_map = pc.gos_loader()
total_image = []
total_signal = []
file_len_map = {}
bef_cnt = 0
for i in range(len(filenames)):
filename = filenames[i].split('_')[0]
imgs = ae.signal_to_img(signals[i])
total_image.extend(imgs)
total_signal.extend(signals[i])
file_len_map[filename] = [bef_cnt, bef_cnt + len(imgs)]
bef_cnt += len(imgs)
"""
total_rep_imgs = ae.autoencoding_cnn(total_image, total_image, img_dim=128, encoding_dim=32)
"""
gos_color = ['b', 'g', 'r', 'c', 'm']
pen = open('img/label_gos.csv', 'w')
cnt = 0
cp_bef = datetime.datetime.now()
for k, v in file_len_map.items():
for i in range(v[0], v[1]):
plt.figure(1)
plt.imshow(total_image[i].reshape(128, 128))
plt.savefig('img/signal_classification/ori/'+k+'_'+str(i)+'.png')
plt.cla(); plt.clf()
pen.write(str(i)+'\n')
"""
plt.figure(1)
plt.imshow(total_rep_imgs[i].reshape(128, 128))
plt.savefig('img/signal_100/rep/'+k+'_'+str(i-v[0])+'.png')
"""
if cnt % 10 == 0:
cp_aft = datetime.datetime.now()
print('P: ('+str(cnt)+'/'+str(len(total_image))+') ' + str(cp_aft-cp_bef))
cp_bef = cp_aft
cnt += 1
"""
Y_o = mf.tSNELearning(total_image, n_component=5, init='pca', random_state=0)
Y_r = mf.tSNELearning(total_rep_imgs, n_component=5, init='pca', random_state=0)
fig = plt.figure(2)
for k, v in file_len_map.items():
for i in range(v[0], v[1]):
plt.scatter(Y_o[i][0], Y_o[i][1], color=gos_color[gos_map[k]])
fig2 = plt.figure(2)
for k, v in file_len_map.items():
for i in rnage(v[0], v[1]):
plt.scatter(Y_r[i][0], Y_r[i][1], color=gos_color[gos_map[k]])
plt.show()
""" | mit |
GGiecold/PySCUBA | src/PySCUBA/Gap_stats.py | 1 | 3952 | #!/usr/bin/env python
# PySCUBA/src/PySCUBA/Gap_stats.py
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: [email protected]; [email protected]
from collections import defaultdict
import numpy as np
from sklearn.cluster import k_means
from sklearn.cluster import MiniBatchKMeans
__all__ = ['gap_stats']
def KMEANS(data, k):
if data.shape[0] < 20000:
centroids, cluster_IDs, _ = k_means(data, k, init = 'k-means++', precompute_distances = 'auto', n_init = 20, max_iter = 200)
else:
mbkm = MiniBatchKMeans(k, 'k-means++', max_iter = 100, batch_size = data.shape[0] / k, n_init = 20)
mbkm.fit(data)
centroids = mbkm.cluster_centers_
cluster_IDs = mbkm.labels_
return centroids, cluster_IDs
def box_corners(data):
mins = np.amin(data, axis = 0)
maxs = np.amax(data, axis = 0)
return zip(mins, maxs)
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = np.reshape(x, N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in xrange(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def W_k(data, centroids, cluster_IDs):
cluster_IDs = one_to_max(cluster_IDs)
N_clusters = int(np.amax(cluster_IDs) + 1)
assert len(centroids) == N_clusters
W_k = 0
for i in xrange(N_clusters):
samples_in_i = np.where(cluster_IDs == i)[0]
if samples_in_i.size > 0:
W_k += np.sum(np.linalg.norm(data[j] - centroids[i]) ** 2 for j in samples_in_i)
return W_k
def gap_stats(data, min_k = 1, max_k = 10):
B = 100
assert isinstance(min_k, int) or type(min_k) is np.int_
assert isinstance(max_k, int) or type(max_k) is np.int_
assert (min_k > 0) and (max_k > 0)
if min_k == max_k:
return None, None, None
k_list = np.arange(min_k, max_k + 1)
min_maxs = box_corners(data)
log_W_list = []
E_log_W_list = []
s_k_list = []
for k in k_list:
centroids, cluster_IDs = KMEANS(data, k)
log_W_list.append(np.log(W_k(data, centroids, cluster_IDs)))
log_W_k_b_list = np.zeros(B, dtype = float)
for b in xrange(B):
uniform_data = np.zeros((data.shape[0], data.shape[1]), dtype = float)
for i in xrange(data.shape[1]):
v = np.random.uniform(low = min_maxs[i][0], high = min_maxs[i][1], size = data.shape[0])
uniform_data[:, i] = v
centroids, cluster_IDs = KMEANS(uniform_data, k)
log_W_k_b_list[b] = np.log(W_k(uniform_data, centroids, cluster_IDs))
E_log_W_list.append((np.sum(log_W_k_b_list) + 0.0) / B)
s_k = np.sum((log_W_k_b_list - E_log_W_list[-1]) ** 2) / B
s_k = np.sqrt(s_k)
s_k_list.append(s_k)
log_W_list = np.asarray(log_W_list, dtype = float)
E_log_W_list = np.asarray(E_log_W_list, dtype = float)
s_k_list = np.asarray(s_k_list, dtype = float)
s_k_list *= np.sqrt(1+ 1/(B + 0.0))
return log_W_list, E_log_W_list, s_k_list
| mit |
JamesClough/dagology | examples/diagrams/diagram_utils.py | 1 | 1048 | # These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# Transitive reduction in case we're using old networkx version
import networkx as nx
def transitive_reduction(G):
TR = nx.DiGraph()
TR.add_nodes_from(G.nodes())
for u in G:
u_edges = set(G[u])
for v in G[u]:
u_edges -= {y for x, y in nx.dfs_edges(G, v)}
TR.add_edges_from((u,v) for v in u_edges)
return TR
| mit |
polyaxon/polyaxon | examples/in_cluster/sklearn/iris/app.py | 1 | 1445 | import streamlit as st
import pandas as pd
import joblib
import argparse
from PIL import Image
def load_model(model_path: str):
model = open(model_path, "rb")
return joblib.load(model)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model-path',
type=str,
)
args = parser.parse_args()
setosa = Image.open("images/iris-setosa.png")
versicolor = Image.open("images/iris-versicolor.png")
virginica = Image.open("images/iris-virginica.png")
classifier = load_model(args.model_path)
print(classifier)
st.title("Iris flower species Classification")
st.sidebar.title("Features")
parameter_list = [
"Sepal length (cm)",
"Sepal Width (cm)",
"Petal length (cm)",
"Petal Width (cm)"
]
sliders = []
for parameter, parameter_df in zip(parameter_list, ['5.2', '3.2', '4.2', '1.2']):
values = st.sidebar.slider(
label=parameter,
key=parameter,
value=float(parameter_df),
min_value=0.0,
max_value=8.0,
step=0.1
)
sliders.append(values)
input_variables = pd.DataFrame([sliders], columns=parameter_list)
prediction = classifier.predict(input_variables)
if prediction == 0:
st.image(setosa)
elif prediction == 1:
st.image(versicolor)
else:
st.image(virginica)
| apache-2.0 |
IndraVikas/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
janusnic/21v-python | unit_20/matplotlib/date_demo1.py | 2 | 1503 | #!/usr/bin/env python
"""
Show how to make date plots in matplotlib using date tick locators and
formatters. See major_minor_demo1.py for more information on
controlling major and minor ticks
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC. The conversion, tick locating and
formatting is done behind the scenes so this is most transparent to
you. The dates module provides several converter functions date2num
and num2date
This example requires an active internet connection since it uses
yahoo finance to get the data for plotting
"""
import matplotlib.pyplot as plt
from matplotlib.finance import quotes_historical_yahoo_ochl
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
import datetime
date1 = datetime.date(1995, 1, 1)
date2 = datetime.date(2004, 4, 12)
years = YearLocator() # every year
months = MonthLocator() # every month
yearsFmt = DateFormatter('%Y')
quotes = quotes_historical_yahoo_ochl('INTC', date1, date2)
if len(quotes) == 0:
raise SystemExit
dates = [q[0] for q in quotes]
opens = [q[1] for q in quotes]
fig, ax = plt.subplots()
ax.plot_date(dates, opens, '-')
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
ax.autoscale_view()
# format the coords message box
def price(x):
return '$%1.2f' % x
ax.fmt_xdata = DateFormatter('%Y-%m-%d')
ax.fmt_ydata = price
ax.grid(True)
fig.autofmt_xdate()
plt.show()
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/delaunay/triangulate.py | 1 | 7756 | import warnings
# 2.3 compatibility
try:
set
except NameError:
import sets
set = sets.Set
import numpy as np
from matplotlib._delaunay import delaunay
from interpolate import LinearInterpolator, NNInterpolator
__all__ = ['Triangulation', 'DuplicatePointWarning']
class DuplicatePointWarning(RuntimeWarning):
"""Duplicate points were passed in to the triangulation routine.
"""
class Triangulation(object):
"""A Delaunay triangulation of points in a plane.
Triangulation(x, y)
x, y -- the coordinates of the points as 1-D arrays of floats
Let us make the following definitions:
npoints = number of points input
nedges = number of edges in the triangulation
ntriangles = number of triangles in the triangulation
point_id = an integer identifying a particular point (specifically, an
index into x and y), range(0, npoints)
edge_id = an integer identifying a particular edge, range(0, nedges)
triangle_id = an integer identifying a particular triangle
range(0, ntriangles)
Attributes: (all should be treated as read-only to maintain consistency)
x, y -- the coordinates of the points as 1-D arrays of floats.
circumcenters -- (ntriangles, 2) array of floats giving the (x,y)
coordinates of the circumcenters of each triangle (indexed by a
triangle_id).
edge_db -- (nedges, 2) array of point_id's giving the points forming
each edge in no particular order; indexed by an edge_id.
triangle_nodes -- (ntriangles, 3) array of point_id's giving the points
forming each triangle in counter-clockwise order; indexed by a
triangle_id.
triangle_neighbors -- (ntriangles, 3) array of triangle_id's giving the
neighboring triangle; indexed by a triangle_id.
The value can also be -1 meaning that that edge is on the convex hull of
the points and there is no neighbor on that edge. The values are ordered
such that triangle_neighbors[tri, i] corresponds with the edge
*opposite* triangle_nodes[tri, i]. As such, these neighbors are also in
counter-clockwise order.
hull -- list of point_id's giving the nodes which form the convex hull
of the point set. This list is sorted in counter-clockwise order.
"""
def __init__(self, x, y):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x,y must be equal-length 1-D arrays")
self.old_shape = self.x.shape
j_unique = self._collapse_duplicate_points()
if j_unique.shape != self.x.shape:
warnings.warn(
"Input data contains duplicate x,y points; some values are ignored.",
DuplicatePointWarning,
)
self.j_unique = j_unique
self.x = self.x[self.j_unique]
self.y = self.y[self.j_unique]
else:
self.j_unique = None
self.circumcenters, self.edge_db, self.triangle_nodes, \
self.triangle_neighbors = delaunay(self.x, self.y)
self.hull = self._compute_convex_hull()
def _collapse_duplicate_points(self):
"""Generate index array that picks out unique x,y points.
This appears to be required by the underlying delaunay triangulation
code.
"""
# Find the indices of the unique entries
j_sorted = np.lexsort(keys=(self.x, self.y))
mask_unique = np.hstack([
True,
(np.diff(self.x[j_sorted]) != 0) | (np.diff(self.y[j_sorted]) != 0),
])
return j_sorted[mask_unique]
def _compute_convex_hull(self):
"""Extract the convex hull from the triangulation information.
The output will be a list of point_id's in counter-clockwise order
forming the convex hull of the data set.
"""
border = (self.triangle_neighbors == -1)
edges = {}
edges.update(dict(zip(self.triangle_nodes[border[:,0]][:,1],
self.triangle_nodes[border[:,0]][:,2])))
edges.update(dict(zip(self.triangle_nodes[border[:,1]][:,2],
self.triangle_nodes[border[:,1]][:,0])))
edges.update(dict(zip(self.triangle_nodes[border[:,2]][:,0],
self.triangle_nodes[border[:,2]][:,1])))
# Take an arbitrary starting point and its subsequent node
hull = list(edges.popitem())
while edges:
hull.append(edges.pop(hull[-1]))
# hull[-1] == hull[0], so remove hull[-1]
hull.pop()
return hull
def linear_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
assigning a plane to each triangle.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return LinearInterpolator(self, z, default_value)
def nn_interpolator(self, z, default_value=np.nan):
"""Get an object which can interpolate within the convex hull by
the natural neighbors method.
z -- an array of floats giving the known function values at each point
in the triangulation.
"""
z = np.asarray(z, dtype=np.float64)
if z.shape != self.old_shape:
raise ValueError("z must be the same shape as x and y")
if self.j_unique is not None:
z = z[self.j_unique]
return NNInterpolator(self, z, default_value)
def prep_extrapolator(self, z, bbox=None):
if bbox is None:
bbox = (self.x[0], self.x[0], self.y[0], self.y[0])
minx, maxx, miny, maxy = np.asarray(bbox, np.float64)
minx = min(minx, np.minimum.reduce(self.x))
miny = min(miny, np.minimum.reduce(self.y))
maxx = max(maxx, np.maximum.reduce(self.x))
maxy = max(maxy, np.maximum.reduce(self.y))
M = max((maxx-minx)/2, (maxy-miny)/2)
midx = (minx + maxx)/2.0
midy = (miny + maxy)/2.0
xp, yp= np.array([[midx+3*M, midx, midx-3*M],
[midy, midy+3*M, midy-3*M]])
x1 = np.hstack((self.x, xp))
y1 = np.hstack((self.y, yp))
newtri = self.__class__(x1, y1)
# do a least-squares fit to a plane to make pseudo-data
xy1 = np.ones((len(self.x), 3), np.float64)
xy1[:,0] = self.x
xy1[:,1] = self.y
from numpy.dual import lstsq
c, res, rank, s = lstsq(xy1, z)
zp = np.hstack((z, xp*c[0] + yp*c[1] + c[2]))
return newtri, zp
def nn_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.nn_interpolator(zp, default_value)
def linear_extrapolator(self, z, bbox=None, default_value=np.nan):
newtri, zp = self.prep_extrapolator(z, bbox)
return newtri.linear_interpolator(zp, default_value)
def node_graph(self):
"""Return a graph of node_id's pointing to node_id's.
The arcs of the graph correspond to the edges in the triangulation.
{node_id: set([node_id, ...]), ...}
"""
g = {}
for i, j in self.edge_db:
s = g.setdefault(i, set())
s.add(j)
s = g.setdefault(j, set())
s.add(i)
return g
| gpl-2.0 |
ahnitz/mpld3 | mpld3/mplexporter/renderers/vega_renderer.py | 54 | 5284 | import warnings
import json
import random
from .base import Renderer
from ..exporter import Exporter
class VegaRenderer(Renderer):
def open_figure(self, fig, props):
self.props = props
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
self.data = []
self.scales = []
self.axes = []
self.marks = []
def open_axes(self, ax, props):
if len(self.axes) > 0:
warnings.warn("multiple axes not yet supported")
self.axes = [dict(type="x", scale="x", ticks=10),
dict(type="y", scale="y", ticks=10)]
self.scales = [dict(name="x",
domain=props['xlim'],
type="linear",
range="width",
),
dict(name="y",
domain=props['ylim'],
type="linear",
range="height",
),]
def draw_line(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'line',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"stroke": {"value": style['color']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['linewidth']},
}
}
})
def draw_markers(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'symbol',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"fill": {"value": style['facecolor']},
"fillOpacity": {"value": style['alpha']},
"stroke": {"value": style['edgecolor']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['edgewidth']},
}
}
})
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
if text_type == 'xlabel':
self.axes[0]['title'] = text
elif text_type == 'ylabel':
self.axes[1]['title'] = text
class VegaHTML(object):
def __init__(self, renderer):
self.specification = dict(width=renderer.figwidth,
height=renderer.figheight,
data=renderer.data,
scales=renderer.scales,
axes=renderer.axes,
marks=renderer.marks)
def html(self):
"""Build the HTML representation for IPython."""
id = random.randint(0, 2 ** 16)
html = '<div id="vis%d"></div>' % id
html += '<script>\n'
html += VEGA_TEMPLATE % (json.dumps(self.specification), id)
html += '</script>\n'
return html
def _repr_html_(self):
return self.html()
def fig_to_vega(fig, notebook=False):
"""Convert a matplotlib figure to vega dictionary
if notebook=True, then return an object which will display in a notebook
otherwise, return an HTML string.
"""
renderer = VegaRenderer()
Exporter(renderer).run(fig)
vega_html = VegaHTML(renderer)
if notebook:
return vega_html
else:
return vega_html.html()
VEGA_TEMPLATE = """
( function() {
var _do_plot = function() {
if ( (typeof vg == 'undefined') && (typeof IPython != 'undefined')) {
$([IPython.events]).on("vega_loaded.vincent", _do_plot);
return;
}
vg.parse.spec(%s, function(chart) {
chart({el: "#vis%d"}).update();
});
};
_do_plot();
})();
"""
| bsd-3-clause |
muku42/bokeh | bokeh/charts/builder/dot_builder.py | 43 | 6160 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Dot class which lets you build your Dot charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors, make_scatter
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, FactorRange, GlyphRenderer, Range1d
from ...models.glyphs import Segment
from ...properties import Any, Bool, Either, List
def Dot(values, cat=None, stem=True, xscale="categorical", yscale="linear",
xgrid=False, ygrid=True, **kws):
""" Create a dot chart using :class:`DotBuilder <bokeh.charts.builder.dot_builder.DotBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Dot, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = OrderedDict()
xyvalues['python']=[2, 5]
xyvalues['pypy']=[12, 40]
xyvalues['jython']=[22, 30]
dot = Dot(xyvalues, ['cpu1', 'cpu2'], title='dots')
output_file('dot.html')
show(dot)
"""
return create_and_build(
DotBuilder, values, cat=cat, stem=stem, xscale=xscale, yscale=yscale,
xgrid=xgrid, ygrid=ygrid, **kws
)
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DotBuilder(Builder):
"""This is the Dot class and it is in charge of plotting Dot chart
in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (segments and circles) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
stem = Bool(True, help="""
Whether to draw a stem from each do to the axis.
""")
def _process_data(self):
"""Take the Dot data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the rect glyph inside the ``_yield_renderers`` method.
"""
if not self.cat:
self.cat = [str(x) for x in self._values.index]
self._data = dict(cat=self.cat, zero=np.zeros(len(self.cat)))
# list to save all the attributes we are going to create
# list to save all the groups available in the incoming input
# Grouping
self._groups.extend(self._values.keys())
step = np.linspace(0, 1.0, len(self._values.keys()) + 1, endpoint=False)
for i, (val, values) in enumerate(self._values.items()):
# original y value
self.set_and_get("", val, values)
# x value
cats = [c + ":" + str(step[i + 1]) for c in self.cat]
self.set_and_get("cat", val, cats)
# zeros
self.set_and_get("z_", val, np.zeros(len(values)))
# segment top y value
self.set_and_get("seg_top_", val, values)
def _set_sources(self):
"""Push the Dot data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = FactorRange(factors=self._source.data["cat"])
cat = [i for i in self._attr if not i.startswith(("cat",))]
end = 1.1 * max(max(self._data[i]) for i in cat)
self.y_range = Range1d(start=0, end=end)
def _yield_renderers(self):
"""Use the rect glyphs to display the bars.
Takes reference points from data loaded at the source and
renders circle glyphs (and segments) on the related
coordinates.
"""
self._tuples = list(chunk(self._attr, 4))
colors = cycle_colors(self._tuples, self.palette)
# quartet elements are: [data, cat, zeros, segment_top]
for i, quartet in enumerate(self._tuples):
# draw segment first so when scatter will be place on top of it
# and it won't show segment chunk on top of the circle
if self.stem:
glyph = Segment(
x0=quartet[1], y0=quartet[2], x1=quartet[1], y1=quartet[3],
line_color="black", line_width=2
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
renderer = make_scatter(
self._source, quartet[1], quartet[0], 'circle',
colors[i - 1], line_color='black', size=15, fill_alpha=1.,
)
self._legends.append((self._groups[i], [renderer]))
yield renderer
| bsd-3-clause |
f3r/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 124 | 1877 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
lw = 2
plt.plot(xs, l1(xs), color=l1_color, label="L1", lw=lw)
plt.plot(xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(xs, l2(xs), color=l2_color, label="L2", lw=lw)
plt.plot(xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(xs, el(xs, alpha), color=elastic_net_color, label="Elastic Net", lw=lw)
plt.plot(xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
jinghaomiao/apollo | modules/tools/navigation/driving_behavior/path_plot.py | 6 | 1302 | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.subplot2grid((1, 1), (0, 0))
styles = ["b-", "r-", "y-"]
i = 0
for fn in sys.argv[1:]:
f = open(fn, 'r')
xs = []
ys = []
for line in f:
line = line.replace("\n", '')
data = line.split(',')
x = float(data[0])
y = float(data[1])
xs.append(x)
ys.append(y)
f.close()
si = i % len(styles)
ax.plot(xs, ys, styles[si], lw=3, alpha=0.8)
i += 1
ax.axis('equal')
plt.show()
| apache-2.0 |
glennq/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 38 | 11165 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
from sklearn.utils.testing import SkipTest, assert_equal, assert_true
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask,
dtype=np.float64)
assert_true(A.dtype == np.float64)
def test_connect_regions():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_face():
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
face = face.astype(np.float32)
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = (face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2]
+ face[1::2, 1::2])
face = face.astype(np.float32)
face /= 16.0
return face
def _orange_face(face=None):
face = _downsampled_face() if face is None else face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face=None):
face = _downsampled_face() if face is None else face
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
downsampled_face = _downsampled_face()
orange_face = _orange_face(downsampled_face)
face_collection = _make_images(downsampled_face)
def test_extract_patches_all():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, face, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color():
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit():
faces = face_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(faces))
def test_patch_extractor_max_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(faces) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
faces = face_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(faces)
assert_equal(patches.shape, (len(faces) * 100, 19, 25))
def test_patch_extractor_all_patches():
faces = face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(faces)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(face, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
facaiy/spark | python/pyspark/sql/tests/test_arrow.py | 4 | 19752 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import threading
import time
import unittest
import warnings
from pyspark.sql import Row
from pyspark.sql.types import *
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
from pyspark.util import _exception_message
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
from distutils.version import LooseVersion
import pyarrow as pa
super(ArrowTests, cls).setUpClass()
cls.warnings_lock = threading.Lock()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True)])
cls.data = [(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1)),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2)),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3))]
# TODO: remove version check once minimum pyarrow version is 0.10.0
if LooseVersion("0.10.0") <= LooseVersion(pa.__version__):
cls.schema.add(StructField("9_binary_t", BinaryType(), True))
cls.data[0] = cls.data[0] + (bytearray(b"a"),)
cls.data[1] = cls.data[1] + (bytearray(b"bb"),)
cls.data[2] = cls.data[2] + (bytearray(b"ccc"),)
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
super(ArrowTests, cls).tearDownClass()
def create_pandas_data_frame(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
import pandas as pd
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([({u'a': 1},)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertPandasEqual(pdf, pd.DataFrame({u'map': [{u'a': 1}]}))
def test_toPandas_fallback_disabled(self):
from distutils.version import LooseVersion
import pyarrow as pa
schema = StructType([StructField("map", MapType(StringType(), IntegerType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with self.assertRaisesRegexp(Exception, 'Unsupported type'):
df.toPandas()
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
schema = StructType([StructField("binary", BinaryType(), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, 'Unsupported type.*BinaryType'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
self.assertPandasEqual(expected, pdf)
self.assertPandasEqual(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_la, pdf_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
self.assertPandasEqual(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEquals(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/New_York"
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": False,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
with self.sql_conf({
"spark.sql.execution.pandas.respectSessionTimeZone": True,
"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEquals(self.schema, df.schema)
pdf_arrow = df.toPandas()
self.assertPandasEqual(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[0], fields[7] = fields[7], fields[0] # swap str with timestamp
wrong_schema = StructType(fields)
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, ".*No cast.*string.*timestamp.*"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEquals(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
import pandas as pd
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEquals(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
import pandas as pd
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
import pandas as pd
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.ix[0, '8_timestamp_t'] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.ix[1, '2_int_t'] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEquals(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
import pandas as pd
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_int_col_names(self):
import numpy as np
import pandas as pd
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
import pandas as pd
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
df = self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in _exception_message(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a={u'a': 1})])
def test_createDataFrame_fallback_disabled(self):
from distutils.version import LooseVersion
import pandas as pd
import pyarrow as pa
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame([[{u'a': 1}]]), "a: map<string, int>")
# TODO: remove BinaryType check once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
with QuietTest(self.sc):
with self.assertRaisesRegexp(TypeError, 'Unsupported type.*BinaryType'):
self.spark.createDataFrame(
pd.DataFrame([[{'a': b'aaa'}]]), "a: binary")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
import pandas as pd
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
self.assertPandasEqual(pdf, df_from_python.toPandas())
self.assertPandasEqual(pdf, df_from_pandas.toPandas())
def test_toPandas_batch_order(self):
def delay_first_part(partition_index, iterator):
if partition_index == 0:
time.sleep(0.1)
return iterator
# Collects Arrow RecordBatches out of order in driver JVM then re-orders in Python
def run_test(num_records, num_parts, max_records, use_delay=False):
df = self.spark.range(num_records, numPartitions=num_parts).toDF("a")
if use_delay:
df = df.rdd.mapPartitionsWithIndex(delay_first_part).toDF()
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": max_records}):
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
self.assertPandasEqual(pdf, pdf_arrow)
cases = [
(1024, 512, 2), # Use large num partitions for more likely collecting out of order
(64, 8, 2, True), # Use delay in first partition to force collecting out of order
(64, 64, 1), # Test single batch per partition
(64, 1, 64), # Test single partition, single batch
(64, 1, 8), # Test single partition, multiple batches
(30, 7, 2), # Test different sized partitions
]
for case in cases:
run_test(*case)
class EncryptionArrowTests(ArrowTests):
@classmethod
def conf(cls):
return super(EncryptionArrowTests, cls).conf().set("spark.io.encryption.enabled", "true")
if __name__ == "__main__":
from pyspark.sql.tests.test_arrow import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
Caoimhinmg/PmagPy | pmagpy/command_line_extractor.py | 1 | 5617 | #!/usr/bin/env python
from __future__ import print_function
from builtins import object
import pandas as pd
import sys
import pmagpy.pmag as pmag
class command_line_dataframe(object):
"""
creates a dataframe used for validating arguments grabbed from sys.argv.
the dataframe is accessed as self.df.
the dataframe has three columns -- arg_name, reqd, and default -- and an arbitrary number of rows.
arg_name is the flag that signals the beginning of a value or list of values, i.e. "-f" for infile(s).
reqd is a boolean value for whether that flag is required to run the script.
default is a default value to use if the user doesn't provide that flag.
to deviate from using the default dataframe, pass in a list of lists with this format: [["f', False, "default.txt"], ...]
this adds or updates values for "f", indicating that it is not required and does have a default value ("default.txt")
"""
def __init__(self, changes=None):
arg_names = ['f', 'F', 'A', 'WD', 'ID', 'Fsa', 'Fsi']
self.default_dict = {'arg_name': arg_names, 'reqd': [True, False, False, False, False, False, False], 'default': ['', '', '', '.', '.', 'er_samples.txt', 'er_sites.txt']}
print(arg_names, len(arg_names))
self.df = pd.DataFrame(self.default_dict, index=arg_names)
arg_names = self.df['arg_name']
if changes:
for change in changes:
#print 'change:', change
if change[0] in arg_names.index:
self.df.loc[change[0], 'reqd'] = change[1]
self.df.loc[change[0], 'default'] = change[2]
else:
#print 'putting in:', change
d = pd.DataFrame({'arg_name': [change[0]], 'reqd': [change[1]], 'default': [change[2]]}, index=[change[0]])
self.df = pd.concat([self.df, d])
def extract_args(argv):
"""
take sys.argv that is used to call a command-line script and return a correctly split list of arguments
for example, this input: ["eqarea.py", "-f", "infile", "-F", "outfile", "-A"]
will return this output: [['f', 'infile'], ['F', 'outfile'], ['A']]
"""
string = " ".join(argv)
string = string.split(' -')
program = string[0]
arguments = [s.split() for s in string[1:]]
return arguments
def check_args(arguments, data_frame):
"""
check arguments against a command_line_dataframe.
checks that:
all arguments are valid
all required arguments are present
default values are used where needed
"""
stripped_args = [a[0] for a in arguments]
df = data_frame.df
# first make sure all args are valid
for a in arguments:
if a[0] not in df.index:
print("-I- ignoring invalid argument: {}".format(a[0]))
print("-")
# next make sure required arguments are present
condition = df['reqd']
reqd_args = df[condition]
for arg in reqd_args['arg_name']:
if arg not in stripped_args:
raise pmag.MissingCommandLineArgException("-"+arg)
#next, assign any default values as needed
#condition = df['default'] != '' # don't need this, and sometimes the correct default argument IS ''
default_args = df #[condition]
for value in default_args.values:
arg_name, default = value[0], value[1]
if arg_name not in stripped_args:
print("-I- using default for arg:", arg_name)
print("-")
arguments.append([arg_name, default])
return arguments
def extract_and_check_args(args_list, dataframe):
arguments = extract_args(args_list)
checked_args = check_args(arguments, dataframe)
return checked_args
def get_vars(arg_names, args_list):
stripped_args = [arg[0] for arg in args_list]
vals = []
for arg in arg_names:
ind = stripped_args.index(arg)
values = args_list[ind][1:]
islower = arg.islower()
vals.append(values or islower)
clean_vals = []
for val in vals: # transform vals into a list of strings, int/floats, and booleans (instead of lists and booleans)
# deal with booleans
if isinstance(val, bool) or isinstance(val, int) or isinstance(val, float):
clean_vals.append(val)
else:
# deal with numbers
if len(val) == 1 and (isinstance(val[0], int) or isinstance(val[0], float)):
clean_vals.append(val[0])
# deal with lists
elif not isinstance(val, bool):
try:
clean_vals.append(' '.join(val))
except TypeError:
clean_vals.append([])
# deal with strings
else:
clean_vals.append(val)
return clean_vals
##example use:
##make a pandas dataframe with three columns:
## col 1 is the command-line flag (minus the '-'), common ones include f, F, fsa, Fsa, etc.
## col 2 is a boolean for if the flag is required or not
## col 3 is a default value to use if the flag is not provided
#dataframe = command_line_dataframe([['sav', False, 0], ['fmt', False, 'svg'], ['s', False, 20]])
## get the args from the command line:
#args = sys.argv
## check through the args to make sure that reqd args are present, defaults are used as needed, and invalid args are ignored
#checked_args = extract_and_check_args(args, dataframe)
## assign values to variables based on their associated command-line flag
#fmt, size, plot = get_vars(['fmt', 's', 'sav'], checked_args)
#print "fmt:", fmt, "size:", size, "plot:", plot
| bsd-3-clause |
fmacias64/Dato-Core | src/unity/python/graphlab/data_structures/sarray.py | 13 | 91593 | """
This module defines the SArray class which provides the
ability to create, access and manipulate a remote scalable array object.
SArray acts similarly to pandas.Series but without indexing.
The data is immutable, homogeneous, and is stored on the GraphLab Server side.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import graphlab.connect as _mt
import graphlab.connect.main as glconnect
from graphlab.cython.cy_type_utils import pytype_from_dtype, infer_type_of_list, is_numeric_type
from graphlab.cython.cy_sarray import UnitySArrayProxy
from graphlab.cython.context import debug_trace as cython_context
from graphlab.util import _make_internal_url, _is_callable
import graphlab as gl
import inspect
import math
from graphlab.deps import numpy, HAS_NUMPY
from graphlab.deps import pandas, HAS_PANDAS
import time
import array
import datetime
import graphlab.meta as meta
import itertools
import warnings
__all__ = ['SArray']
def _create_sequential_sarray(size, start=0, reverse=False):
if type(size) is not int:
raise TypeError("size must be int")
if type(start) is not int:
raise TypeError("size must be int")
if type(reverse) is not bool:
raise TypeError("reverse must me bool")
with cython_context():
return SArray(_proxy=glconnect.get_unity().create_sequential_sarray(size, start, reverse))
class SArray(object):
"""
An immutable, homogeneously typed array object backed by persistent storage.
SArray is scaled to hold data that are much larger than the machine's main
memory. It fully supports missing values and random access. The
data backing an SArray is located on the same machine as the GraphLab
Server process. Each column in an :py:class:`~graphlab.SFrame` is an
SArray.
Parameters
----------
data : list | numpy.ndarray | pandas.Series | string
The input data. If this is a list, numpy.ndarray, or pandas.Series,
the data in the list is converted and stored in an SArray.
Alternatively if this is a string, it is interpreted as a path (or
url) to a text file. Each line of the text file is loaded as a
separate row. If ``data`` is a directory where an SArray was previously
saved, this is loaded as an SArray read directly out of that
directory.
dtype : {None, int, float, str, list, array.array, dict, datetime.datetime, graphlab.Image}, optional
The data type of the SArray. If not specified (None), we attempt to
infer it from the input. If it is a numpy array or a Pandas series, the
dtype of the array/series is used. If it is a list, the dtype is
inferred from the inner list. If it is a URL or path to a text file, we
default the dtype to str.
ignore_cast_failure : bool, optional
If True, ignores casting failures but warns when elements cannot be
casted into the specified dtype.
Notes
-----
- If ``data`` is pandas.Series, the index will be ignored.
- The datetime is based on the Boost datetime format (see http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html
for details)
- When working with the GraphLab EC2 instance (see
:py:func:`graphlab.aws.launch_EC2()`), an SArray cannot be constructed
using local file path, because it involves a potentially large amount of
data transfer from client to server. However, it is still okay to use a
remote file path. See the examples below. The same restriction applies to
:py:class:`~graphlab.SGraph` and :py:class:`~graphlab.SFrame`.
Examples
--------
SArray can be constructed in various ways:
Construct an SArray from list.
>>> from graphlab import SArray
>>> sa = SArray(data=[1,2,3,4,5], dtype=int)
Construct an SArray from numpy.ndarray.
>>> sa = SArray(data=numpy.asarray([1,2,3,4,5]), dtype=int)
or:
>>> sa = SArray(numpy.asarray([1,2,3,4,5]), int)
Construct an SArray from pandas.Series.
>>> sa = SArray(data=pd.Series([1,2,3,4,5]), dtype=int)
or:
>>> sa = SArray(pd.Series([1,2,3,4,5]), int)
If the type is not specified, automatic inference is attempted:
>>> SArray(data=[1,2,3,4,5]).dtype()
int
>>> SArray(data=[1,2,3,4,5.0]).dtype()
float
The SArray supports standard datatypes such as: integer, float and string.
It also supports three higher level datatypes: float arrays, dict
and list (array of arbitrary types).
Create an SArray from a list of strings:
>>> sa = SArray(data=['a','b'])
Create an SArray from a list of float arrays;
>>> sa = SArray([[1,2,3], [3,4,5]])
Create an SArray from a list of lists:
>>> sa = SArray(data=[['a', 1, {'work': 3}], [2, 2.0]])
Create an SArray from a list of dictionaries:
>>> sa = SArray(data=[{'a':1, 'b': 2}, {'b':2, 'c': 1}])
Create an SArray from a list of datetime objects:
>>> sa = SArray(data=[datetime.datetime(2011, 10, 20, 9, 30, 10)])
Construct an SArray from local text file. (Only works for local server).
>>> sa = SArray('/tmp/a_to_z.txt.gz')
Construct an SArray from a text file downloaded from a URL.
>>> sa = SArray('http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz')
**Numeric Operators**
SArrays support a large number of vectorized operations on numeric types.
For instance:
>>> sa = SArray([1,1,1,1,1])
>>> sb = SArray([2,2,2,2,2])
>>> sc = sa + sb
>>> sc
dtype: int
Rows: 5
[3, 3, 3, 3, 3]
>>> sc + 2
dtype: int
Rows: 5
[5, 5, 5, 5, 5]
Operators which are supported include all numeric operators (+,-,*,/), as
well as comparison operators (>, >=, <, <=), and logical operators (&, |).
For instance:
>>> sa = SArray([1,2,3,4,5])
>>> (sa >= 2) & (sa <= 4)
dtype: int
Rows: 5
[0, 1, 1, 1, 0]
The numeric operators (+,-,*,/) also work on array types:
>>> sa = SArray(data=[[1.0,1.0], [2.0,2.0]])
>>> sa + 1
dtype: list
Rows: 2
[array('f', [2.0, 2.0]), array('f', [3.0, 3.0])]
>>> sa + sa
dtype: list
Rows: 2
[array('f', [2.0, 2.0]), array('f', [4.0, 4.0])]
The addition operator (+) can also be used for string concatenation:
>>> sa = SArray(data=['a','b'])
>>> sa + "x"
dtype: str
Rows: 2
['ax', 'bx']
This can be useful for performing type interpretation of lists or
dictionaries stored as strings:
>>> sa = SArray(data=['a,b','c,d'])
>>> ("[" + sa + "]").astype(list) # adding brackets make it look like a list
dtype: list
Rows: 2
[['a', 'b'], ['c', 'd']]
All comparison operations and boolean operators are supported and emit
binary SArrays.
>>> sa = SArray([1,2,3,4,5])
>>> sa >= 2
dtype: int
Rows: 3
[0, 1, 1, 1, 1]
>>> (sa >= 2) & (sa <= 4)
dtype: int
Rows: 3
[0, 1, 1, 1, 0]
**Element Access and Slicing**
SArrays can be accessed by integer keys just like a regular python list.
Such operations may not be fast on large datasets so looping over an SArray
should be avoided.
>>> sa = SArray([1,2,3,4,5])
>>> sa[0]
1
>>> sa[2]
3
>>> sa[5]
IndexError: SFrame index out of range
Negative indices can be used to access elements from the tail of the array
>>> sa[-1] # returns the last element
5
>>> sa[-2] # returns the second to last element
4
The SArray also supports the full range of python slicing operators:
>>> sa[1000:] # Returns an SArray containing rows 1000 to the end
>>> sa[:1000] # Returns an SArray containing rows 0 to row 999 inclusive
>>> sa[0:1000:2] # Returns an SArray containing rows 0 to row 1000 in steps of 2
>>> sa[-100:] # Returns an SArray containing last 100 rows
>>> sa[-100:len(sa):2] # Returns an SArray containing last 100 rows in steps of 2
**Logical Filter**
An SArray can be filtered using
>>> array[binary_filter]
where array and binary_filter are SArrays of the same length. The result is
a new SArray which contains only elements of 'array' where its matching row
in the binary_filter is non zero.
This permits the use of boolean operators that can be used to perform
logical filtering operations. For instance:
>>> sa = SArray([1,2,3,4,5])
>>> sa[(sa >= 2) & (sa <= 4)]
dtype: int
Rows: 3
[2, 3, 4]
This can also be used more generally to provide filtering capability which
is otherwise not expressible with simple boolean functions. For instance:
>>> sa = SArray([1,2,3,4,5])
>>> sa[sa.apply(lambda x: math.log(x) <= 1)]
dtype: int
Rows: 3
[1, 2]
This is equivalent to
>>> sa.filter(lambda x: math.log(x) <= 1)
dtype: int
Rows: 3
[1, 2]
**Iteration**
The SArray is also iterable, but not efficiently since this involves a
streaming transmission of data from the server to the client. This should
not be used for large data.
>>> sa = SArray([1,2,3,4,5])
>>> [i + 1 for i in sa]
[2, 3, 4, 5, 6]
This can be used to convert an SArray to a list:
>>> sa = SArray([1,2,3,4,5])
>>> l = list(sa)
>>> l
[1, 2, 3, 4, 5]
"""
def __init__(self, data=[], dtype=None, ignore_cast_failure=False, _proxy=None):
"""
__init__(data=list(), dtype=None, ignore_cast_failure=False)
Construct a new SArray. The source of data includes: list,
numpy.ndarray, pandas.Series, and urls.
"""
_mt._get_metric_tracker().track('sarray.init')
if dtype is not None and type(dtype) != type:
raise TypeError('dtype must be a type, e.g. use int rather than \'int\'')
if (_proxy):
self.__proxy__ = _proxy
elif type(data) == SArray:
self.__proxy__ = data.__proxy__
else:
self.__proxy__ = UnitySArrayProxy(glconnect.get_client())
# we need to perform type inference
if dtype is None:
if (isinstance(data, list)):
# if it is a list, Get the first type and make sure
# the remaining items are all of the same type
dtype = infer_type_of_list(data)
elif isinstance(data, array.array):
dtype = infer_type_of_list(data)
elif HAS_PANDAS and isinstance(data, pandas.Series):
# if it is a pandas series get the dtype of the series
dtype = pytype_from_dtype(data.dtype)
if dtype == object:
# we need to get a bit more fine grained than that
dtype = infer_type_of_list(data)
elif HAS_NUMPY and isinstance(data, numpy.ndarray):
# if it is a numpy array, get the dtype of the array
dtype = pytype_from_dtype(data.dtype)
if dtype == object:
# we need to get a bit more fine grained than that
dtype = infer_type_of_list(data)
if len(data.shape) == 2:
# we need to make it an array or a list
if dtype == float or dtype == int:
dtype = array.array
else:
dtype = list
elif len(data.shape) > 2:
raise TypeError("Cannot convert Numpy arrays of greater than 2 dimensions")
elif (isinstance(data, str) or isinstance(data, unicode)):
# if it is a file, we default to string
dtype = str
if HAS_PANDAS and isinstance(data, pandas.Series):
with cython_context():
self.__proxy__.load_from_iterable(data.values, dtype, ignore_cast_failure)
elif (HAS_NUMPY and isinstance(data, numpy.ndarray)) or isinstance(data, list) or isinstance(data, array.array):
with cython_context():
self.__proxy__.load_from_iterable(data, dtype, ignore_cast_failure)
elif (isinstance(data, str) or isinstance(data, unicode)):
internal_url = _make_internal_url(data)
with cython_context():
self.__proxy__.load_autodetect(internal_url, dtype)
else:
raise TypeError("Unexpected data source. " \
"Possible data source types are: list, " \
"numpy.ndarray, pandas.Series, and string(url)")
@classmethod
def from_const(cls, value, size):
"""
Constructs an SArray of size with a const value.
Parameters
----------
value : [int | float | str | array.array | list | dict | datetime]
The value to fill the SArray
size : int
The size of the SArray
Examples
--------
Construct an SArray consisting of 10 zeroes:
>>> graphlab.SArray.from_const(0, 10)
"""
assert type(size) is int and size >= 0, "size must be a positive int"
if (type(value) not in [type(None), int, float, str, array.array, list, dict, datetime.datetime]):
raise TypeError('Cannot create sarray of value type %s' % str(type(value)))
proxy = UnitySArrayProxy(glconnect.get_client())
proxy.load_from_const(value, size)
return cls(_proxy=proxy)
@classmethod
def from_sequence(cls, *args):
"""
from_sequence(start=0, stop)
Create an SArray from sequence
.. sourcecode:: python
Construct an SArray of integer values from 0 to 999
>>> gl.SArray.from_sequence(1000)
This is equivalent, but more efficient than:
>>> gl.SArray(range(1000))
Construct an SArray of integer values from 10 to 999
>>> gl.SArray.from_sequence(10, 1000)
This is equivalent, but more efficient than:
>>> gl.SArray(range(10, 1000))
Parameters
----------
start : int, optional
The start of the sequence. The sequence will contain this value.
stop : int
The end of the sequence. The sequence will not contain this value.
"""
start = None
stop = None
# fill with args. This checks for from_sequence(100), from_sequence(10,100)
if len(args) == 1:
stop = args[0]
elif len(args) == 2:
start = args[0]
stop = args[1]
if stop is None and start is None:
raise TypeError("from_sequence expects at least 1 argument. got 0")
elif start is None:
return _create_sequential_sarray(stop)
else:
size = stop - start
# this matches the behavior of range
# i.e. range(100,10) just returns an empty array
if (size < 0):
size = 0
return _create_sequential_sarray(size, start)
@classmethod
def from_avro(cls, filename):
"""
Construct an SArray from an Avro file. The SArray type is determined by
the schema of the Avro file.
Parameters
----------
filename : str
The Avro file to load into an SArray.
Examples
--------
Construct an SArray from a local Avro file named 'data.avro':
>>> graphlab.SArray.from_avro('/data/data.avro')
Notes
-----
Currently only supports direct loading of files on the local filesystem.
References
----------
- `Avro Specification <http://avro.apache.org/docs/1.7.7/spec.html>`_
"""
_mt._get_metric_tracker().track('sarray.from_avro')
proxy = UnitySArrayProxy(glconnect.get_client())
proxy.load_from_avro(filename)
return cls(_proxy = proxy)
def __get_content_identifier__(self):
"""
Returns the unique identifier of the content that backs the SArray
Notes
-----
Meant for internal use only.
"""
with cython_context():
return self.__proxy__.get_content_identifier()
def save(self, filename, format=None):
"""
Saves the SArray to file.
The saved SArray will be in a directory named with the `targetfile`
parameter.
Parameters
----------
filename : string
A local path or a remote URL. If format is 'text', it will be
saved as a text file. If format is 'binary', a directory will be
created at the location which will contain the SArray.
format : {'binary', 'text', 'csv'}, optional
Format in which to save the SFrame. Binary saved SArrays can be
loaded much faster and without any format conversion losses.
'text' and 'csv' are synonymous: Each SArray row will be written
as a single line in an output text file. If not
given, will try to infer the format from filename given. If file
name ends with 'csv', 'txt' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
"""
if format == None:
if filename.endswith(('.csv', '.csv.gz', 'txt')):
format = 'text'
else:
format = 'binary'
if format == 'binary':
with cython_context():
self.__proxy__.save(_make_internal_url(filename))
elif format == 'text':
sf = gl.SFrame({'X1':self})
with cython_context():
sf.__proxy__.save_as_csv(_make_internal_url(filename), {'header':False})
def _escape_space(self,s):
return "".join([ch.encode('string_escape') if ch.isspace() else ch for ch in s])
def __repr__(self):
"""
Returns a string description of the SArray.
"""
ret = "dtype: " + str(self.dtype().__name__) + "\n"
ret = ret + "Rows: " + str(self.size()) + "\n"
ret = ret + self.__str__()
return ret
def __str__(self):
"""
Returns a string containing the first 100 elements of the array.
"""
# If sarray is image, take head of elements casted to string.
if self.dtype() == gl.data_structures.image.Image:
headln = str(list(self._head_str(100)))
else:
headln = self._escape_space(str(list(self.head(100))))
headln = unicode(headln.decode('string_escape'),'utf-8',errors='replace').encode('utf-8')
if (self.size() > 100):
# cut the last close bracket
# and replace it with ...
headln = headln[0:-1] + ", ... ]"
return headln
def __nonzero__(self):
"""
Returns true if the array is not empty.
"""
return self.size() != 0
def __len__(self):
"""
Returns the length of the array
"""
return self.size()
def __iter__(self):
"""
Provides an iterator to the contents of the array.
"""
def generator():
elems_at_a_time = 262144
self.__proxy__.begin_iterator()
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
while(True):
for j in ret:
yield j
if len(ret) == elems_at_a_time:
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
else:
break
return generator()
def __add__(self, other):
"""
If other is a scalar value, adds it to the current array, returning
the new result. If other is an SArray, performs an element-wise
addition of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '+'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '+'))
def __sub__(self, other):
"""
If other is a scalar value, subtracts it from the current array, returning
the new result. If other is an SArray, performs an element-wise
subtraction of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '-'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '-'))
def __mul__(self, other):
"""
If other is a scalar value, multiplies it to the current array, returning
the new result. If other is an SArray, performs an element-wise
multiplication of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '*'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '*'))
def __div__(self, other):
"""
If other is a scalar value, divides each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise division of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '/'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '/'))
def __lt__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '<'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '<'))
def __gt__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '>'))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '>'))
def __le__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '<='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '<='))
def __ge__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '>='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '>='))
def __radd__(self, other):
"""
Adds a scalar value to the current array.
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '+'))
def __rsub__(self, other):
"""
Subtracts a scalar value from the current array.
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '-'))
def __rmul__(self, other):
"""
Multiplies a scalar value to the current array.
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '*'))
def __rdiv__(self, other):
"""
Divides a scalar value by each element in the array
Returned array has the same type as the array on the right hand side
"""
with cython_context():
return SArray(_proxy = self.__proxy__.right_scalar_operator(other, '/'))
def __eq__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the new result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '=='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '=='))
def __ne__(self, other):
"""
If other is a scalar value, compares each element of the current array
by the value, returning the new result. If other is an SArray, performs
an element-wise comparison of the two arrays.
"""
with cython_context():
if type(other) is SArray:
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '!='))
else:
return SArray(_proxy = self.__proxy__.left_scalar_operator(other, '!='))
def __and__(self, other):
"""
Perform a logical element-wise 'and' against another SArray.
"""
if type(other) is SArray:
with cython_context():
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '&'))
else:
raise TypeError("SArray can only perform logical and against another SArray")
def __or__(self, other):
"""
Perform a logical element-wise 'or' against another SArray.
"""
if type(other) is SArray:
with cython_context():
return SArray(_proxy = self.__proxy__.vector_operator(other.__proxy__, '|'))
else:
raise TypeError("SArray can only perform logical or against another SArray")
def __getitem__(self, other):
"""
If the key is an SArray of identical length, this function performs a
logical filter: i.e. it subselects all the elements in this array
where the corresponding value in the other array evaluates to true.
If the key is an integer this returns a single row of
the SArray. If the key is a slice, this returns an SArray with the
sliced rows. See the GraphLab Create User Guide for usage examples.
"""
sa_len = len(self)
if type(other) is int:
if other < 0:
other += sa_len
if other >= sa_len:
raise IndexError("SFrame index out of range")
try:
lb, ub, value_list = self._getitem_cache
if lb <= other < ub:
return value_list[other - lb]
except AttributeError:
pass
# Not in cache, need to grab it
block_size = 1024 * (32 if self.dtype() in [int, long, float] else 4)
block_num = int(other // block_size)
lb = block_num * block_size
ub = min(sa_len, lb + block_size)
val_list = list(SArray(_proxy = self.__proxy__.copy_range(lb, 1, ub)))
self._getitem_cache = (lb, ub, val_list)
return val_list[other - lb]
elif type(other) is SArray:
if len(other) != sa_len:
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SArray(_proxy = self.__proxy__.logical_filter(other.__proxy__))
elif type(other) is slice:
start = other.start
stop = other.stop
step = other.step
if start is None:
start = 0
if stop is None:
stop = sa_len
if step is None:
step = 1
# handle negative indices
if start < 0:
start = sa_len + start
if stop < 0:
stop = sa_len + stop
return SArray(_proxy = self.__proxy__.copy_range(start, step, stop))
else:
raise IndexError("Invalid type to use for indexing")
def __materialize__(self):
"""
For a SArray that is lazily evaluated, force persist this sarray
to disk, committing all lazy evaluated operations.
"""
with cython_context():
self.__proxy__.materialize()
def __is_materialized__(self):
"""
Returns whether or not the sarray has been materialized.
"""
return self.__proxy__.is_materialized()
def size(self):
"""
The size of the SArray.
"""
return self.__proxy__.size()
def dtype(self):
"""
The data type of the SArray.
Returns
-------
out : type
The type of the SArray.
Examples
--------
>>> sa = gl.SArray(["The quick brown fox jumps over the lazy dog."])
>>> sa.dtype()
str
>>> sa = gl.SArray(range(10))
>>> sa.dtype()
int
"""
return self.__proxy__.dtype()
def head(self, n=10):
"""
Returns an SArray which contains the first n rows of this SArray.
Parameters
----------
n : int
The number of rows to fetch.
Returns
-------
out : SArray
A new SArray which contains the first n rows of the current SArray.
Examples
--------
>>> gl.SArray(range(10)).head(5)
dtype: int
Rows: 5
[0, 1, 2, 3, 4]
"""
return SArray(_proxy=self.__proxy__.head(n))
def vector_slice(self, start, end=None):
"""
If this SArray contains vectors or recursive types, this returns a new SArray
containing each individual vector sliced, between start and end, exclusive.
Parameters
----------
start : int
The start position of the slice.
end : int, optional.
The end position of the slice. Note that the end position
is NOT included in the slice. Thus a g.vector_slice(1,3) will extract
entries in position 1 and 2.
Returns
-------
out : SArray
Each individual vector sliced according to the arguments.
Examples
--------
If g is a vector of floats:
>>> g = SArray([[1,2,3],[2,3,4]])
>>> g
dtype: array
Rows: 2
[array('d', [1.0, 2.0, 3.0]), array('d', [2.0, 3.0, 4.0])]
>>> g.vector_slice(0) # extracts the first element of each vector
dtype: float
Rows: 2
[1.0, 2.0]
>>> g.vector_slice(0, 2) # extracts the first two elements of each vector
dtype: array.array
Rows: 2
[array('d', [1.0, 2.0]), array('d', [2.0, 3.0])]
If a vector cannot be sliced, the result will be None:
>>> g = SArray([[1],[1,2],[1,2,3]])
>>> g
dtype: array.array
Rows: 3
[array('d', [1.0]), array('d', [1.0, 2.0]), array('d', [1.0, 2.0, 3.0])]
>>> g.vector_slice(2)
dtype: float
Rows: 3
[None, None, 3.0]
>>> g.vector_slice(0,2)
dtype: list
Rows: 3
[None, array('d', [1.0, 2.0]), array('d', [1.0, 2.0])]
If g is a vector of mixed types (float, int, str, array, list, etc.):
>>> g = SArray([['a',1,1.0],['b',2,2.0]])
>>> g
dtype: list
Rows: 2
[['a', 1, 1.0], ['b', 2, 2.0]]
>>> g.vector_slice(0) # extracts the first element of each vector
dtype: list
Rows: 2
[['a'], ['b']]
"""
if (self.dtype() != array.array) and (self.dtype() != list):
raise RuntimeError("Only Vector type can be sliced")
if end == None:
end = start + 1
with cython_context():
return SArray(_proxy=self.__proxy__.vector_slice(start, end))
def _count_words(self, to_lower=True):
"""
For documentation, see graphlab.text_analytics.count_ngrams().
"""
if (self.dtype() != str):
raise TypeError("Only SArray of string type is supported for counting bag of words")
_mt._get_metric_tracker().track('sarray.count_words')
# construct options, will extend over time
options = dict()
options["to_lower"] = to_lower == True
with cython_context():
return SArray(_proxy=self.__proxy__.count_bag_of_words(options))
def _count_ngrams(self, n=2, method="word", to_lower=True, ignore_space=True):
"""
For documentation, see graphlab.text_analytics.count_ngrams().
"""
if (self.dtype() != str):
raise TypeError("Only SArray of string type is supported for counting n-grams")
if (type(n) != int):
raise TypeError("Input 'n' must be of type int")
if (n < 1):
raise ValueError("Input 'n' must be greater than 0")
if (n > 5):
warnings.warn("It is unusual for n-grams to be of size larger than 5.")
_mt._get_metric_tracker().track('sarray.count_ngrams', properties={'n':n, 'method':method})
# construct options, will extend over time
options = dict()
options["to_lower"] = to_lower == True
options["ignore_space"] = ignore_space == True
if method == "word":
with cython_context():
return SArray(_proxy=self.__proxy__.count_ngrams(n, options ))
elif method == "character" :
with cython_context():
return SArray(_proxy=self.__proxy__.count_character_ngrams(n, options ))
else:
raise ValueError("Invalid 'method' input value. Please input either 'word' or 'character' ")
def dict_trim_by_keys(self, keys, exclude=True):
"""
Filter an SArray of dictionary type by the given keys. By default, all
keys that are in the provided list in ``keys`` are *excluded* from the
returned SArray.
Parameters
----------
keys : list
A collection of keys to trim down the elements in the SArray.
exclude : bool, optional
If True, all keys that are in the input key list are removed. If
False, only keys that are in the input key list are retained.
Returns
-------
out : SArray
A SArray of dictionary type, with each dictionary element trimmed
according to the input criteria.
See Also
--------
dict_trim_by_values
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":1, "dog":2},
{"this": 2, "are": 2, "cat": 1}])
>>> sa.dict_trim_by_keys(["this", "is", "and", "are"], exclude=True)
dtype: dict
Rows: 2
[{'dog': 2}, {'cat': 1}]
"""
if isinstance(keys, str) or (not hasattr(keys, "__iter__")):
keys = [keys]
_mt._get_metric_tracker().track('sarray.dict_trim_by_keys')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_trim_by_keys(keys, exclude))
def dict_trim_by_values(self, lower=None, upper=None):
"""
Filter dictionary values to a given range (inclusive). Trimming is only
performed on values which can be compared to the bound values. Fails on
SArrays whose data type is not ``dict``.
Parameters
----------
lower : int or long or float, optional
The lowest dictionary value that would be retained in the result. If
not given, lower bound is not applied.
upper : int or long or float, optional
The highest dictionary value that would be retained in the result.
If not given, upper bound is not applied.
Returns
-------
out : SArray
An SArray of dictionary type, with each dict element trimmed
according to the input criteria.
See Also
--------
dict_trim_by_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_trim_by_values(2,5)
dtype: dict
Rows: 2
[{'is': 5}, {'this': 2, 'cat': 5}]
>>> sa.dict_trim_by_values(upper=5)
dtype: dict
Rows: 2
[{'this': 1, 'is': 5}, {'this': 2, 'are': 1, 'cat': 5}]
"""
if None != lower and (not is_numeric_type(type(lower))):
raise TypeError("lower bound has to be a numeric value")
if None != upper and (not is_numeric_type(type(upper))):
raise TypeError("upper bound has to be a numeric value")
_mt._get_metric_tracker().track('sarray.dict_trim_by_values')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_trim_by_values(lower, upper))
def dict_keys(self):
"""
Create an SArray that contains all the keys from each dictionary
element as a list. Fails on SArrays whose data type is not ``dict``.
Returns
-------
out : SArray
A SArray of list type, where each element is a list of keys
from the input SArray element.
See Also
--------
dict_values
Examples
---------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_keys()
dtype: list
Rows: 2
[['this', 'is', 'dog'], ['this', 'are', 'cat']]
"""
_mt._get_metric_tracker().track('sarray.dict_keys')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_keys())
def dict_values(self):
"""
Create an SArray that contains all the values from each dictionary
element as a list. Fails on SArrays whose data type is not ``dict``.
Returns
-------
out : SArray
A SArray of list type, where each element is a list of values
from the input SArray element.
See Also
--------
dict_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_values()
dtype: list
Rows: 2
[[1, 5, 7], [2, 1, 5]]
"""
_mt._get_metric_tracker().track('sarray.dict_values')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_values())
def dict_has_any_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has any of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains any key in the input list.
See Also
--------
dict_has_all_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7}, {"animal":1},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_any_keys(["is", "this", "are"])
dtype: int
Rows: 3
[1, 1, 0]
"""
if isinstance(keys, str) or (not hasattr(keys, "__iter__")):
keys = [keys]
_mt._get_metric_tracker().track('sarray.dict_has_any_keys')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_any_keys(keys))
def dict_has_all_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has all of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains all keys in the input list.
See Also
--------
dict_has_any_keys
Examples
--------
>>> sa = graphlab.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_all_keys(["is", "this"])
dtype: int
Rows: 2
[1, 0]
"""
if isinstance(keys, str) or (not hasattr(keys, "__iter__")):
keys = [keys]
_mt._get_metric_tracker().track('sarray.dict_has_all_keys')
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_all_keys(keys))
def apply(self, fn, dtype=None, skip_undefined=True, seed=None,
_lua_translate=False):
"""
apply(fn, dtype=None, skip_undefined=True, seed=None)
Transform each element of the SArray by a given function. The result
SArray is of type ``dtype``. ``fn`` should be a function that returns
exactly one value which can be cast into the type specified by
``dtype``. If ``dtype`` is not specified, the first 100 elements of the
SArray are used to make a guess about the data type.
Parameters
----------
fn : function
The function to transform each element. Must return exactly one
value which can be cast into the type specified by ``dtype``.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
dtype : {None, int, float, str, list, array.array, dict, graphlab.Image}, optional
The data type of the new SArray. If ``None``, the first 100 elements
of the array are used to guess the target data type.
skip_undefined : bool, optional
If True, will not apply ``fn`` to any undefined values.
seed : int, optional
Used as the seed if a random number generator is included in ``fn``.
Returns
-------
out : SArray
The SArray transformed by ``fn``. Each element of the SArray is of
type ``dtype``.
See Also
--------
SFrame.apply
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.apply(lambda x: x*2)
dtype: int
Rows: 3
[2, 4, 6]
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
#include <cmath>
using namespace graphlab;
double logx(const flexible_type& x, double base) {
return log((double)(x)) / log(base);
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(logx, "x", "base");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> import example
>>> sa = graphlab.SArray([1,2,4])
>>> sa.apply(lambda x: example.logx(x, 2))
dtype: float
Rows: 3
[0.0, 1.0, 2.0]
"""
if (type(fn) == str):
fn = "LUA" + fn
if dtype == None:
raise TypeError("dtype must be specified for a lua function")
else:
assert _is_callable(fn), "Input must be a function"
dryrun = [fn(i) for i in self.head(100) if i is not None]
import traceback
if dtype == None:
dtype = infer_type_of_list(dryrun)
if not seed:
seed = time.time()
# log metric
_mt._get_metric_tracker().track('sarray.apply')
# First phase test if it is a toolkit function
nativefn = None
try:
import graphlab.extensions as extensions
nativefn = extensions._build_native_function_call(fn)
except:
# failure are fine. we just fall out into the next few phases
pass
if nativefn is not None:
# this is a toolkit lambda. We can do something about it
with cython_context():
return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, skip_undefined, seed))
# Second phase. Try lua compilation if possible
try:
# try compilation
if _lua_translate:
# its a function
print "Attempting Lua Translation"
import graphlab.Lua_Translator
import ast
import StringIO
def isalambda(v):
return isinstance(v, type(lambda: None)) and v.__name__ == '<lambda>'
output = StringIO.StringIO()
translator = gl.Lua_Translator.translator_NodeVisitor(output)
ast_node = None
try:
if not isalambda(fn):
ast_node = ast.parse(inspect.getsource(fn))
translator.rename_function[fn.__name__] = "__lambda__transfer__"
except:
pass
try:
if ast_node == None:
print "Cannot translate. Trying again from byte code decompilation"
ast_node = meta.decompiler.decompile_func(fn)
translator.rename_function[""] = "__lambda__transfer__"
except:
pass
if ast_node == None:
raise ValueError("Unable to get source of function")
ftype = gl.Lua_Translator.FunctionType()
selftype = self.dtype()
if selftype == list:
ftype.input_type = tuple([[]])
elif selftype == dict:
ftype.input_type = tuple([{}])
elif selftype == array.array:
ftype.input_type = tuple([[float]])
else:
ftype.input_type = tuple([selftype])
translator.function_known_types["__lambda__transfer__"] = ftype
translator.translate_ast(ast_node)
print "Lua Translation Success"
print output.getvalue()
fn = "LUA" + output.getvalue()
except Exception as e:
print traceback.format_exc()
print "Lua Translation Failed"
print e
except:
print traceback.format_exc()
print "Lua Translation Failed"
with cython_context():
return SArray(_proxy=self.__proxy__.transform(fn, dtype, skip_undefined, seed))
def filter(self, fn, skip_undefined=True, seed=None):
"""
Filter this SArray by a function.
Returns a new SArray filtered by this SArray. If `fn` evaluates an
element to true, this element is copied to the new SArray. If not, it
isn't. Throws an exception if the return type of `fn` is not castable
to a boolean value.
Parameters
----------
fn : function
Function that filters the SArray. Must evaluate to bool or int.
skip_undefined : bool, optional
If True, will not apply fn to any undefined values.
seed : int, optional
Used as the seed if a random number generator is included in fn.
Returns
-------
out : SArray
The SArray filtered by fn. Each element of the SArray is of
type int.
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.filter(lambda x: x < 3)
dtype: int
Rows: 2
[1, 2]
"""
assert inspect.isfunction(fn), "Input must be a function"
if not seed:
seed = time.time()
_mt._get_metric_tracker().track('sarray.filter')
with cython_context():
return SArray(_proxy=self.__proxy__.filter(fn, skip_undefined, seed))
def sample(self, fraction, seed=None):
"""
Create an SArray which contains a subsample of the current SArray.
Parameters
----------
fraction : float
The fraction of the rows to fetch. Must be between 0 and 1.
seed : int
The random seed for the random number generator.
Returns
-------
out : SArray
The new SArray which contains the subsampled rows.
Examples
--------
>>> sa = graphlab.SArray(range(10))
>>> sa.sample(.3)
dtype: int
Rows: 3
[2, 6, 9]
"""
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.size() == 0):
return SArray()
if not seed:
seed = time.time()
_mt._get_metric_tracker().track('sarray.sample')
with cython_context():
return SArray(_proxy=self.__proxy__.sample(fraction, seed))
def _save_as_text(self, url):
"""
Save the SArray to disk as text file.
"""
raise NotImplementedError
def all(self):
"""
Return True if every element of the SArray evaluates to False. For
numeric SArrays zeros and missing values (``None``) evaluate to False,
while all non-zero, non-missing values evaluate to True. For string,
list, and dictionary SArrays, empty values (zero length strings, lists
or dictionaries) or missing values (``None``) evaluate to False. All
other values evaluate to True.
Returns True on an empty SArray.
Returns
-------
out : bool
See Also
--------
any
Examples
--------
>>> graphlab.SArray([1, None]).all()
False
>>> graphlab.SArray([1, 0]).all()
False
>>> graphlab.SArray([1, 2]).all()
True
>>> graphlab.SArray(["hello", "world"]).all()
True
>>> graphlab.SArray(["hello", ""]).all()
False
>>> graphlab.SArray([]).all()
True
"""
with cython_context():
return self.__proxy__.all()
def any(self):
"""
Return True if any element of the SArray evaluates to True. For numeric
SArrays any non-zero value evaluates to True. For string, list, and
dictionary SArrays, any element of non-zero length evaluates to True.
Returns False on an empty SArray.
Returns
-------
out : bool
See Also
--------
all
Examples
--------
>>> graphlab.SArray([1, None]).any()
True
>>> graphlab.SArray([1, 0]).any()
True
>>> graphlab.SArray([0, 0]).any()
False
>>> graphlab.SArray(["hello", "world"]).any()
True
>>> graphlab.SArray(["hello", ""]).any()
True
>>> graphlab.SArray(["", ""]).any()
False
>>> graphlab.SArray([]).any()
False
"""
with cython_context():
return self.__proxy__.any()
def max(self):
"""
Get maximum numeric value in SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type.
Returns
-------
out : type of SArray
Maximum value of SArray
See Also
--------
min
Examples
--------
>>> graphlab.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).max()
96
"""
with cython_context():
return self.__proxy__.max()
def min(self):
"""
Get minimum numeric value in SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type.
Returns
-------
out : type of SArray
Minimum value of SArray
See Also
--------
max
Examples
--------
>>> graphlab.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).min()
"""
with cython_context():
return self.__proxy__.min()
def sum(self):
"""
Sum of all values in this SArray.
Raises an exception if called on an SArray of strings, lists, or
dictionaries. If the SArray contains numeric arrays (array.array) and
all the arrays are the same length, the sum over all the arrays will be
returned. Returns None on an empty SArray. For large values, this may
overflow without warning.
Returns
-------
out : type of SArray
Sum of all values in SArray
"""
with cython_context():
return self.__proxy__.sum()
def mean(self):
"""
Mean of all the values in the SArray, or mean image.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or non-Image type.
Returns
-------
out : float | graphlab.Image
Mean of all values in SArray, or image holding per-pixel mean
across the input SArray.
"""
with cython_context():
if self.dtype() == gl.Image:
import graphlab.extensions as extensions
return extensions.generate_mean(self)
else:
return self.__proxy__.mean()
def std(self, ddof=0):
"""
Standard deviation of all the values in the SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or if `ddof` >= length of SArray.
Parameters
----------
ddof : int, optional
"delta degrees of freedom" in the variance calculation.
Returns
-------
out : float
The standard deviation of all the values.
"""
with cython_context():
return self.__proxy__.std(ddof)
def var(self, ddof=0):
"""
Variance of all the values in the SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or if `ddof` >= length of SArray.
Parameters
----------
ddof : int, optional
"delta degrees of freedom" in the variance calculation.
Returns
-------
out : float
Variance of all values in SArray.
"""
with cython_context():
return self.__proxy__.var(ddof)
def num_missing(self):
"""
Number of missing elements in the SArray.
Returns
-------
out : int
Number of missing values.
"""
with cython_context():
return self.__proxy__.num_missing()
def nnz(self):
"""
Number of non-zero elements in the SArray.
Returns
-------
out : int
Number of non-zero elements.
"""
with cython_context():
return self.__proxy__.nnz()
def datetime_to_str(self,str_format="%Y-%m-%dT%H:%M:%S%ZP"):
"""
Create a new SArray with all the values cast to str. The string format is
specified by the 'str_format' parameter.
Parameters
----------
str_format : str
The format to output the string. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
Returns
-------
out : SArray[str]
The SArray converted to the type 'str'.
Examples
--------
>>> dt = datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5))
>>> sa = graphlab.SArray([dt])
>>> sa.datetime_to_str("%e %b %Y %T %ZP")
dtype: str
Rows: 1
[20 Oct 2011 09:30:10 GMT-05:00]
See Also
----------
str_to_datetime
References
----------
[1] Boost date time from string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html)
"""
if(self.dtype() != datetime.datetime):
raise TypeError("datetime_to_str expects SArray of datetime as input SArray")
_mt._get_metric_tracker().track('sarray.datetime_to_str')
with cython_context():
return SArray(_proxy=self.__proxy__.datetime_to_str(str_format))
def str_to_datetime(self,str_format="%Y-%m-%dT%H:%M:%S%ZP"):
"""
Create a new SArray with all the values cast to datetime. The string format is
specified by the 'str_format' parameter.
Parameters
----------
str_format : str
The string format of the input SArray. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
Returns
-------
out : SArray[datetime.datetime]
The SArray converted to the type 'datetime'.
Examples
--------
>>> sa = graphlab.SArray(["20-Oct-2011 09:30:10 GMT-05:30"])
>>> sa.str_to_datetime("%d-%b-%Y %H:%M:%S %ZP")
dtype: datetime
Rows: 1
datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5.5))
See Also
----------
datetime_to_str
References
----------
[1] boost date time to string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html)
"""
if(self.dtype() != str):
raise TypeError("str_to_datetime expects SArray of str as input SArray")
_mt._get_metric_tracker().track('sarray.str_to_datetime')
with cython_context():
return SArray(_proxy=self.__proxy__.str_to_datetime(str_format))
def pixel_array_to_image(self, width, height, channels, undefined_on_failure=True, allow_rounding=False):
"""
Create a new SArray with all the values cast to :py:class:`graphlab.image.Image`
of uniform size.
Parameters
----------
width: int
The width of the new images.
height: int
The height of the new images.
channels: int.
Number of channels of the new images.
undefined_on_failure: bool , optional , default True
If True, return None type instead of Image type in failure instances.
If False, raises error upon failure.
allow_rounding: bool, optional , default False
If True, rounds non-integer values when converting to Image type.
If False, raises error upon rounding.
Returns
-------
out : SArray[graphlab.Image]
The SArray converted to the type 'graphlab.Image'.
See Also
--------
astype, str_to_datetime, datetime_to_str
Examples
--------
The MNIST data is scaled from 0 to 1, but our image type only loads integer pixel values
from 0 to 255. If we just convert without scaling, all values below one would be cast to
0.
>>> mnist_array = graphlab.SArray('http://s3.amazonaws.com/dato-datasets/mnist/mnist_vec_sarray')
>>> scaled_mnist_array = mnist_array * 255
>>> mnist_img_sarray = gl.SArray.pixel_array_to_image(scaled_mnist_array, 28, 28, 1, allow_rounding = True)
"""
if(self.dtype() != array.array):
raise TypeError("array_to_img expects SArray of arrays as input SArray")
num_to_test = 10
num_test = min(self.size(), num_to_test)
mod_values = [val % 1 for x in range(num_test) for val in self[x]]
out_of_range_values = [(val > 255 or val < 0) for x in range(num_test) for val in self[x]]
if sum(mod_values) != 0.0 and not allow_rounding:
raise ValueError("There are non-integer values in the array data. Images only support integer data values between 0 and 255. To permit rounding, set the 'allow_rounding' paramter to 1.")
if sum(out_of_range_values) != 0:
raise ValueError("There are values outside the range of 0 to 255. Images only support integer data values between 0 and 255.")
_mt._get_metric_tracker().track('sarray.pixel_array_to_img')
import graphlab.extensions as extensions
return extensions.vector_sarray_to_image_sarray(self, width, height, channels, undefined_on_failure)
def _head_str(self, num_rows):
"""
Takes the head of SArray casted to string.
"""
import graphlab.extensions as extensions
return extensions._head_str(self, num_rows)
def astype(self, dtype, undefined_on_failure=False):
"""
Create a new SArray with all values cast to the given type. Throws an
exception if the types are not castable to the given type.
Parameters
----------
dtype : {int, float, str, list, array.array, dict, datetime.datetime}
The type to cast the elements to in SArray
undefined_on_failure: bool, optional
If set to True, runtime cast failures will be emitted as missing
values rather than failing.
Returns
-------
out : SArray [dtype]
The SArray converted to the type ``dtype``.
Notes
-----
- The string parsing techniques used to handle conversion to dictionary
and list types are quite generic and permit a variety of interesting
formats to be interpreted. For instance, a JSON string can usually be
interpreted as a list or a dictionary type. See the examples below.
- For datetime-to-string and string-to-datetime conversions,
use sa.datetime_to_str() and sa.str_to_datetime() functions.
- For array.array to graphlab.Image conversions, use sa.pixel_array_to_image()
Examples
--------
>>> sa = graphlab.SArray(['1','2','3','4'])
>>> sa.astype(int)
dtype: int
Rows: 4
[1, 2, 3, 4]
Given an SArray of strings that look like dicts, convert to a dictionary
type:
>>> sa = graphlab.SArray(['{1:2 3:4}', '{a:b c:d}'])
>>> sa.astype(dict)
dtype: dict
Rows: 2
[{1: 2, 3: 4}, {'a': 'b', 'c': 'd'}]
"""
_mt._get_metric_tracker().track('sarray.astype.%s' % str(dtype.__name__))
if (dtype == gl.Image) and (self.dtype() == array.array):
raise TypeError("Cannot cast from image type to array with sarray.astype(). Please use sarray.array_to_img() instead.")
with cython_context():
return SArray(_proxy=self.__proxy__.astype(dtype, undefined_on_failure))
def clip(self, lower=float('nan'), upper=float('nan')):
"""
Create a new SArray with each value clipped to be within the given
bounds.
In this case, "clipped" means that values below the lower bound will be
set to the lower bound value. Values above the upper bound will be set
to the upper bound value. This function can operate on SArrays of
numeric type as well as array type, in which case each individual
element in each array is clipped. By default ``lower`` and ``upper`` are
set to ``float('nan')`` which indicates the respective bound should be
ignored. The method fails if invoked on an SArray of non-numeric type.
Parameters
----------
lower : int, optional
The lower bound used to clip. Ignored if equal to ``float('nan')``
(the default).
upper : int, optional
The upper bound used to clip. Ignored if equal to ``float('nan')``
(the default).
Returns
-------
out : SArray
See Also
--------
clip_lower, clip_upper
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.clip(2,2)
dtype: int
Rows: 3
[2, 2, 2]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.clip(lower, upper))
def clip_lower(self, threshold):
"""
Create new SArray with all values clipped to the given lower bound. This
function can operate on numeric arrays, as well as vector arrays, in
which case each individual element in each vector is clipped. Throws an
exception if the SArray is empty or the types are non-numeric.
Parameters
----------
threshold : float
The lower bound used to clip values.
Returns
-------
out : SArray
See Also
--------
clip, clip_upper
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.clip_lower(2)
dtype: int
Rows: 3
[2, 2, 3]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.clip(threshold, float('nan')))
def clip_upper(self, threshold):
"""
Create new SArray with all values clipped to the given upper bound. This
function can operate on numeric arrays, as well as vector arrays, in
which case each individual element in each vector is clipped.
Parameters
----------
threshold : float
The upper bound used to clip values.
Returns
-------
out : SArray
See Also
--------
clip, clip_lower
Examples
--------
>>> sa = graphlab.SArray([1,2,3])
>>> sa.clip_upper(2)
dtype: int
Rows: 3
[1, 2, 2]
"""
with cython_context():
return SArray(_proxy=self.__proxy__.clip(float('nan'), threshold))
def tail(self, n=10):
"""
Get an SArray that contains the last n elements in the SArray.
Parameters
----------
n : int
The number of elements to fetch
Returns
-------
out : SArray
A new SArray which contains the last n rows of the current SArray.
"""
with cython_context():
return SArray(_proxy=self.__proxy__.tail(n))
def dropna(self):
"""
Create new SArray containing only the non-missing values of the
SArray.
A missing value shows up in an SArray as 'None'. This will also drop
float('nan').
Returns
-------
out : SArray
The new SArray with missing values removed.
"""
_mt._get_metric_tracker().track('sarray.dropna')
with cython_context():
return SArray(_proxy = self.__proxy__.drop_missing_values())
def fillna(self, value):
"""
Create new SArray with all missing values (None or NaN) filled in
with the given value.
The size of the new SArray will be the same as the original SArray. If
the given value is not the same type as the values in the SArray,
`fillna` will attempt to convert the value to the original SArray's
type. If this fails, an error will be raised.
Parameters
----------
value : type convertible to SArray's type
The value used to replace all missing values
Returns
-------
out : SArray
A new SArray with all missing values filled
"""
_mt._get_metric_tracker().track('sarray.fillna')
with cython_context():
return SArray(_proxy = self.__proxy__.fill_missing_values(value))
def topk_index(self, topk=10, reverse=False):
"""
Create an SArray indicating which elements are in the top k.
Entries are '1' if the corresponding element in the current SArray is a
part of the top k elements, and '0' if that corresponding element is
not. Order is descending by default.
Parameters
----------
topk : int
The number of elements to determine if 'top'
reverse: bool
If True, return the topk elements in ascending order
Returns
-------
out : SArray (of type int)
Notes
-----
This is used internally by SFrame's topk function.
"""
with cython_context():
return SArray(_proxy = self.__proxy__.topk_index(topk, reverse))
def sketch_summary(self, background=False, sub_sketch_keys=None):
"""
Summary statistics that can be calculated with one pass over the SArray.
Returns a graphlab.Sketch object which can be further queried for many
descriptive statistics over this SArray. Many of the statistics are
approximate. See the :class:`~graphlab.Sketch` documentation for more
detail.
Parameters
----------
background : boolean, optional
If True, the sketch construction will return immediately and the
sketch will be constructed in the background. While this is going on,
the sketch can be queried incrementally, but at a performance penalty.
Defaults to False.
sub_sketch_keys: int | str | list of int | list of str, optional
For SArray of dict type, also constructs sketches for a given set of keys,
For SArray of array type, also constructs sketches for the given indexes.
The sub sketches may be queried using:
:py:func:`~graphlab.Sketch.element_sub_sketch()`
Defaults to None in which case no subsketches will be constructed.
Returns
-------
out : Sketch
Sketch object that contains descriptive statistics for this SArray.
Many of the statistics are approximate.
"""
from graphlab.data_structures.sketch import Sketch
if (self.dtype() == gl.data_structures.image.Image):
raise TypeError("sketch_summary() is not supported for arrays of image type")
if (type(background) != bool):
raise TypeError("'background' parameter has to be a boolean value")
if (sub_sketch_keys != None):
if (self.dtype() != dict and self.dtype() != array.array):
raise TypeError("sub_sketch_keys is only supported for SArray of dictionary or array type")
if not hasattr(sub_sketch_keys, "__iter__"):
sub_sketch_keys = [sub_sketch_keys]
value_types = set([type(i) for i in sub_sketch_keys])
if (len(value_types) != 1):
raise ValueError("sub_sketch_keys member values need to have the same type.")
value_type = value_types.pop();
if (self.dtype() == dict and value_type != str):
raise TypeError("Only string value(s) can be passed to sub_sketch_keys for SArray of dictionary type. "+
"For dictionary types, sketch summary is computed by casting keys to string values.")
if (self.dtype() == array.array and value_type != int):
raise TypeError("Only int value(s) can be passed to sub_sketch_keys for SArray of array type")
else:
sub_sketch_keys = list()
_mt._get_metric_tracker().track('sarray.sketch_summary')
return Sketch(self, background, sub_sketch_keys = sub_sketch_keys)
def append(self, other):
"""
Append an SArray to the current SArray. Creates a new SArray with the
rows from both SArrays. Both SArrays must be of the same type.
Parameters
----------
other : SArray
Another SArray whose rows are appended to current SArray.
Returns
-------
out : SArray
A new SArray that contains rows from both SArrays, with rows from
the ``other`` SArray coming after all rows from the current SArray.
See Also
--------
SFrame.append
Examples
--------
>>> sa = graphlab.SArray([1, 2, 3])
>>> sa2 = graphlab.SArray([4, 5, 6])
>>> sa.append(sa2)
dtype: int
Rows: 6
[1, 2, 3, 4, 5, 6]
"""
_mt._get_metric_tracker().track('sarray.append')
if type(other) is not SArray:
raise RuntimeError("SArray append can only work with SArray")
if self.dtype() != other.dtype():
raise RuntimeError("Data types in both SArrays have to be the same")
with cython_context():
other.__materialize__()
return SArray(_proxy = self.__proxy__.append(other.__proxy__))
def unique(self):
"""
Get all unique values in the current SArray.
Raises a TypeError if the SArray is of dictionary type. Will not
necessarily preserve the order of the given SArray in the new SArray.
Returns
-------
out : SArray
A new SArray that contains the unique values of the current SArray.
See Also
--------
SFrame.unique
"""
_mt._get_metric_tracker().track('sarray.unique')
tmp_sf = gl.SFrame()
tmp_sf.add_column(self, 'X1')
res = tmp_sf.groupby('X1',{})
return SArray(_proxy=res['X1'].__proxy__)
@gl._check_canvas_enabled
def show(self, view=None):
"""
show(view=None)
Visualize the SArray with GraphLab Create :mod:`~graphlab.canvas`. This function starts Canvas
if it is not already running. If the SArray has already been plotted,
this function will update the plot.
Parameters
----------
view : str, optional
The name of the SFrame view to show. Can be one of:
- None: Use the default (depends on the dtype of the SArray).
- 'Categorical': Shows most frequent items in this SArray, sorted
by frequency. Only valid for str, int, or float dtypes.
- 'Numeric': Shows a histogram (distribution of values) for the
SArray. Only valid for int or float dtypes.
- 'Dictionary': Shows a cross filterable list of keys (categorical)
and values (categorical or numeric). Only valid for dict dtype.
- 'Array': Shows a Numeric view, filterable by sub-column (index).
Only valid for array.array dtype.
- 'List': Shows a Categorical view, aggregated across all sub-
columns (indices). Only valid for list dtype.
Returns
-------
view : graphlab.canvas.view.View
An object representing the GraphLab Canvas view
See Also
--------
canvas
Examples
--------
Suppose 'sa' is an SArray, we can view it in GraphLab Canvas using:
>>> sa.show()
If 'sa' is a numeric (int or float) SArray, we can view it as
a categorical variable using:
>>> sa.show(view='Categorical')
"""
import graphlab.canvas
import graphlab.canvas.inspect
import graphlab.canvas.views.sarray
graphlab.canvas.inspect.find_vars(self)
return graphlab.canvas.show(graphlab.canvas.views.sarray.SArrayView(self, params={
'view': view
}))
def item_length(self):
"""
Length of each element in the current SArray.
Only works on SArrays of dict, array, or list type. If a given element
is a missing value, then the output elements is also a missing value.
This function is equivalent to the following but more performant:
sa_item_len = sa.apply(lambda x: len(x) if x is not None else None)
Returns
-------
out_sf : SArray
A new SArray, each element in the SArray is the len of the corresponding
items in original SArray.
Examples
--------
>>> sa = SArray([
... {"is_restaurant": 1, "is_electronics": 0},
... {"is_restaurant": 1, "is_retail": 1, "is_electronics": 0},
... {"is_restaurant": 0, "is_retail": 1, "is_electronics": 0},
... {"is_restaurant": 0},
... {"is_restaurant": 1, "is_electronics": 1},
... None])
>>> sa.item_length()
dtype: int
Rows: 6
[2, 3, 3, 1, 2, None]
"""
if (self.dtype() not in [list, dict, array.array]):
raise TypeError("item_length() is only applicable for SArray of type list, dict and array.")
_mt._get_metric_tracker().track('sarray.item_length')
with cython_context():
return SArray(_proxy = self.__proxy__.item_length())
def split_datetime(self, column_name_prefix = "X", limit=None, tzone=False):
"""
Splits an SArray of datetime type to multiple columns, return a
new SFrame that contains expanded columns. A SArray of datetime will be
split by default into an SFrame of 6 columns, one for each
year/month/day/hour/minute/second element.
column naming:
When splitting a SArray of datetime type, new columns are named:
prefix.year, prefix.month, etc. The prefix is set by the parameter
"column_name_prefix" and defaults to 'X'. If column_name_prefix is
None or empty, then no prefix is used.
Timezone column:
If tzone parameter is True, then timezone information is represented
as one additional column which is a float shows the offset from
GMT(0.0) or from UTC.
Parameters
----------
column_name_prefix: str, optional
If provided, expanded column names would start with the given prefix.
Defaults to "X".
limit: list[str], optional
Limits the set of datetime elements to expand.
Elements are 'year','month','day','hour','minute',
and 'second'.
tzone: bool, optional
A boolean parameter that determines whether to show timezone column or not.
Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains all expanded columns
Examples
--------
To expand only day and year elements of a datetime SArray
>>> sa = SArray(
[datetime(2011, 1, 21, 7, 7, 21, tzinfo=GMT(0)),
datetime(2010, 2, 5, 7, 8, 21, tzinfo=GMT(4.5)])
>>> sa.split_datetime(column_name_prefix=None,limit=['day','year'])
Columns:
day int
year int
Rows: 2
Data:
+-------+--------+
| day | year |
+-------+--------+
| 21 | 2011 |
| 5 | 2010 |
+-------+--------+
[2 rows x 2 columns]
To expand only year and tzone elements of a datetime SArray
with tzone column represented as a string. Columns are named with prefix:
'Y.column_name'.
>>> sa.split_datetime(column_name_prefix="Y",limit=['year'],tzone=True)
Columns:
Y.year int
Y.tzone float
Rows: 2
Data:
+----------+---------+
| Y.year | Y.tzone |
+----------+---------+
| 2011 | 0.0 |
| 2010 | 4.5 |
+----------+---------+
[2 rows x 2 columns]
"""
if self.dtype() != datetime.datetime:
raise TypeError("Only column of datetime type is supported.")
if column_name_prefix == None:
column_name_prefix = ""
if type(column_name_prefix) != str:
raise TypeError("'column_name_prefix' must be a string")
# convert limit to column_keys
if limit != None:
if (not hasattr(limit, '__iter__')):
raise TypeError("'limit' must be a list");
name_types = set([type(i) for i in limit])
if (len(name_types) != 1):
raise TypeError("'limit' contains values that are different types")
if (name_types.pop() != str):
raise TypeError("'limit' must contain string values.")
if len(set(limit)) != len(limit):
raise ValueError("'limit' contains duplicate values")
column_types = []
if(limit != None):
column_types = list()
for i in limit:
column_types.append(int);
else:
limit = ['year','month','day','hour','minute','second']
column_types = [int, int, int, int, int, int]
if(tzone == True):
limit += ['tzone']
column_types += [float]
_mt._get_metric_tracker().track('sarray.split_datetime')
with cython_context():
return gl.SFrame(_proxy=self.__proxy__.expand(column_name_prefix, limit, column_types))
def unpack(self, column_name_prefix = "X", column_types=None, na_value=None, limit=None):
"""
Convert an SArray of list, array, or dict type to an SFrame with
multiple columns.
`unpack` expands an SArray using the values of each list/array/dict as
elements in a new SFrame of multiple columns. For example, an SArray of
lists each of length 4 will be expanded into an SFrame of 4 columns,
one for each list element. An SArray of lists/arrays of varying size
will be expand to a number of columns equal to the longest list/array.
An SArray of dictionaries will be expanded into as many columns as
there are keys.
When unpacking an SArray of list or array type, new columns are named:
`column_name_prefix`.0, `column_name_prefix`.1, etc. If unpacking a
column of dict type, unpacked columns are named
`column_name_prefix`.key1, `column_name_prefix`.key2, etc.
When unpacking an SArray of list or dictionary types, missing values in
the original element remain as missing values in the resultant columns.
If the `na_value` parameter is specified, all values equal to this
given value are also replaced with missing values. In an SArray of
array.array type, NaN is interpreted as a missing value.
:py:func:`graphlab.SFrame.pack_columns()` is the reverse effect of unpack
Parameters
----------
column_name_prefix: str, optional
If provided, unpacked column names would start with the given prefix.
column_types: list[type], optional
Column types for the unpacked columns. If not provided, column
types are automatically inferred from first 100 rows. Defaults to
None.
na_value: optional
Convert all values that are equal to `na_value` to
missing value if specified.
limit: list, optional
Limits the set of list/array/dict keys to unpack.
For list/array SArrays, 'limit' must contain integer indices.
For dict SArray, 'limit' must contain dictionary keys.
Returns
-------
out : SFrame
A new SFrame that contains all unpacked columns
Examples
--------
To unpack a dict SArray
>>> sa = SArray([{ 'word': 'a', 'count': 1},
... { 'word': 'cat', 'count': 2},
... { 'word': 'is', 'count': 3},
... { 'word': 'coming','count': 4}])
Normal case of unpacking SArray of type dict:
>>> sa.unpack(column_name_prefix=None)
Columns:
count int
word str
<BLANKLINE>
Rows: 4
<BLANKLINE>
Data:
+-------+--------+
| count | word |
+-------+--------+
| 1 | a |
| 2 | cat |
| 3 | is |
| 4 | coming |
+-------+--------+
[4 rows x 2 columns]
<BLANKLINE>
Unpack only keys with 'word':
>>> sa.unpack(limit=['word'])
Columns:
X.word str
<BLANKLINE>
Rows: 4
<BLANKLINE>
Data:
+--------+
| X.word |
+--------+
| a |
| cat |
| is |
| coming |
+--------+
[4 rows x 1 columns]
<BLANKLINE>
>>> sa2 = SArray([
... [1, 0, 1],
... [1, 1, 1],
... [0, 1]])
Convert all zeros to missing values:
>>> sa2.unpack(column_types=[int, int, int], na_value=0)
Columns:
X.0 int
X.1 int
X.2 int
<BLANKLINE>
Rows: 3
<BLANKLINE>
Data:
+------+------+------+
| X.0 | X.1 | X.2 |
+------+------+------+
| 1 | None | 1 |
| 1 | 1 | 1 |
| None | 1 | None |
+------+------+------+
[3 rows x 3 columns]
<BLANKLINE>
"""
if self.dtype() not in [dict, array.array, list]:
raise TypeError("Only SArray of dict/list/array type supports unpack")
if column_name_prefix == None:
column_name_prefix = ""
if type(column_name_prefix) != str:
raise TypeError("'column_name_prefix' must be a string")
# validdate 'limit'
if limit != None:
if (not hasattr(limit, '__iter__')):
raise TypeError("'limit' must be a list");
name_types = set([type(i) for i in limit])
if (len(name_types) != 1):
raise TypeError("'limit' contains values that are different types")
# limit value should be numeric if unpacking sarray.array value
if (self.dtype() != dict) and (name_types.pop() != int):
raise TypeError("'limit' must contain integer values.")
if len(set(limit)) != len(limit):
raise ValueError("'limit' contains duplicate values")
if (column_types != None):
if not hasattr(column_types, '__iter__'):
raise TypeError("column_types must be a list");
for column_type in column_types:
if (column_type not in (int, float, str, list, dict, array.array)):
raise TypeError("column_types contains unsupported types. Supported types are ['float', 'int', 'list', 'dict', 'str', 'array.array']")
if limit != None:
if len(limit) != len(column_types):
raise ValueError("limit and column_types do not have the same length")
elif self.dtype() == dict:
raise ValueError("if 'column_types' is given, 'limit' has to be provided to unpack dict type.")
else:
limit = range(len(column_types))
else:
head_rows = self.head(100).dropna()
lengths = [len(i) for i in head_rows]
if len(lengths) == 0 or max(lengths) == 0:
raise RuntimeError("Cannot infer number of items from the SArray, SArray may be empty. please explicitly provide column types")
# infer column types for dict type at server side, for list and array, infer from client side
if self.dtype() != dict:
length = max(lengths)
if limit == None:
limit = range(length)
else:
# adjust the length
length = len(limit)
if self.dtype() == array.array:
column_types = [float for i in range(length)]
else:
column_types = list()
for i in limit:
t = [(x[i] if ((x is not None) and len(x) > i) else None) for x in head_rows]
column_types.append(infer_type_of_list(t))
_mt._get_metric_tracker().track('sarray.unpack')
with cython_context():
if (self.dtype() == dict and column_types == None):
limit = limit if limit != None else []
return gl.SFrame(_proxy=self.__proxy__.unpack_dict(column_name_prefix, limit, na_value))
else:
return gl.SFrame(_proxy=self.__proxy__.unpack(column_name_prefix, limit, column_types, na_value))
def sort(self, ascending=True):
"""
Sort all values in this SArray.
Sort only works for sarray of type str, int and float, otherwise TypeError
will be raised. Creates a new, sorted SArray.
Parameters
----------
ascending: boolean, optional
If true, the sarray values are sorted in ascending order, otherwise,
descending order.
Returns
-------
out: SArray
Examples
--------
>>> sa = SArray([3,2,1])
>>> sa.sort()
dtype: int
Rows: 3
[1, 2, 3]
"""
if self.dtype() not in (int, float, str, datetime.datetime):
raise TypeError("Only sarray with type (int, float, str, datetime.datetime) can be sorted")
sf = gl.SFrame()
sf['a'] = self
return sf.sort('a', ascending)['a']
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.