repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bsipocz/seaborn | seaborn/tests/test_palettes.py | 22 | 9613 | import colorsys
import numpy as np
import matplotlib as mpl
import nose.tools as nt
import numpy.testing as npt
from .. import palettes, utils, rcmod, husl
from ..xkcd_rgb import xkcd_rgb
from ..crayons import crayons
class TestColorPalettes(object):
def test_current_palette(self):
pal = palettes.color_palette(["red", "blue", "green"], 3)
rcmod.set_palette(pal, 3)
nt.assert_equal(pal, mpl.rcParams["axes.color_cycle"])
rcmod.set()
def test_palette_context(self):
default_pal = palettes.color_palette()
context_pal = palettes.color_palette("muted")
with palettes.color_palette(context_pal):
nt.assert_equal(mpl.rcParams["axes.color_cycle"], context_pal)
nt.assert_equal(mpl.rcParams["axes.color_cycle"], default_pal)
def test_big_palette_context(self):
original_pal = palettes.color_palette("deep", n_colors=8)
context_pal = palettes.color_palette("husl", 10)
rcmod.set_palette(original_pal)
with palettes.color_palette(context_pal, 10):
nt.assert_equal(mpl.rcParams["axes.color_cycle"], context_pal)
nt.assert_equal(mpl.rcParams["axes.color_cycle"], original_pal)
# Reset default
rcmod.set()
def test_seaborn_palettes(self):
pals = "deep", "muted", "pastel", "bright", "dark", "colorblind"
for name in pals:
pal_out = palettes.color_palette(name)
nt.assert_equal(len(pal_out), 6)
def test_hls_palette(self):
hls_pal1 = palettes.hls_palette()
hls_pal2 = palettes.color_palette("hls")
npt.assert_array_equal(hls_pal1, hls_pal2)
def test_husl_palette(self):
husl_pal1 = palettes.husl_palette()
husl_pal2 = palettes.color_palette("husl")
npt.assert_array_equal(husl_pal1, husl_pal2)
def test_mpl_palette(self):
mpl_pal1 = palettes.mpl_palette("Reds")
mpl_pal2 = palettes.color_palette("Reds")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_mpl_dark_palette(self):
mpl_pal1 = palettes.mpl_palette("Blues_d")
mpl_pal2 = palettes.color_palette("Blues_d")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_bad_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("IAmNotAPalette")
def test_terrible_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("jet")
def test_bad_palette_colors(self):
pal = ["red", "blue", "iamnotacolor"]
with nt.assert_raises(ValueError):
palettes.color_palette(pal)
def test_palette_desat(self):
pal1 = palettes.husl_palette(6)
pal1 = [utils.desaturate(c, .5) for c in pal1]
pal2 = palettes.color_palette("husl", desat=.5)
npt.assert_array_equal(pal1, pal2)
def test_palette_is_list_of_tuples(self):
pal_in = np.array(["red", "blue", "green"])
pal_out = palettes.color_palette(pal_in, 3)
nt.assert_is_instance(pal_out, list)
nt.assert_is_instance(pal_out[0], tuple)
nt.assert_is_instance(pal_out[0][0], float)
nt.assert_equal(len(pal_out[0]), 3)
def test_palette_cycles(self):
deep = palettes.color_palette("deep")
double_deep = palettes.color_palette("deep", 12)
nt.assert_equal(double_deep, deep + deep)
def test_hls_values(self):
pal1 = palettes.hls_palette(6, h=0)
pal2 = palettes.hls_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.hls_palette(5, l=.2)
pal_bright = palettes.hls_palette(5, l=.8)
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.hls_palette(5, s=.1)
pal_bold = palettes.hls_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_husl_values(self):
pal1 = palettes.husl_palette(6, h=0)
pal2 = palettes.husl_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.husl_palette(5, l=.2)
pal_bright = palettes.husl_palette(5, l=.8)
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.husl_palette(5, s=.1)
pal_bold = palettes.husl_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_cbrewer_qual(self):
pal_short = palettes.mpl_palette("Set1", 4)
pal_long = palettes.mpl_palette("Set1", 6)
nt.assert_equal(pal_short, pal_long[:4])
pal_full = palettes.mpl_palette("Set2", 8)
pal_long = palettes.mpl_palette("Set2", 10)
nt.assert_equal(pal_full, pal_long[:8])
def test_mpl_reversal(self):
pal_forward = palettes.mpl_palette("BuPu", 6)
pal_reverse = palettes.mpl_palette("BuPu_r", 6)
nt.assert_equal(pal_forward, pal_reverse[::-1])
def test_rgb_from_hls(self):
color = .5, .8, .4
rgb_got = palettes._color_to_rgb(color, "hls")
rgb_want = colorsys.hls_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_husl(self):
color = 120, 50, 40
rgb_got = palettes._color_to_rgb(color, "husl")
rgb_want = husl.husl_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_xkcd(self):
color = "dull red"
rgb_got = palettes._color_to_rgb(color, "xkcd")
rgb_want = xkcd_rgb[color]
nt.assert_equal(rgb_got, rgb_want)
def test_light_palette(self):
pal_forward = palettes.light_palette("red")
pal_reverse = palettes.light_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.light_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_dark_palette(self):
pal_forward = palettes.dark_palette("red")
pal_reverse = palettes.dark_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.dark_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_blend_palette(self):
colors = ["red", "yellow", "white"]
pal_cmap = palettes.blend_palette(colors, as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_cubehelix_against_matplotlib(self):
x = np.linspace(0, 1, 8)
mpl_pal = mpl.cm.cubehelix(x)[:, :3].tolist()
sns_pal = palettes.cubehelix_palette(8, start=0.5, rot=-1.5, hue=1,
dark=0, light=1, reverse=True)
nt.assert_list_equal(sns_pal, mpl_pal)
def test_cubehelix_n_colors(self):
for n in [3, 5, 8]:
pal = palettes.cubehelix_palette(n)
nt.assert_equal(len(pal), n)
def test_cubehelix_reverse(self):
pal_forward = palettes.cubehelix_palette()
pal_reverse = palettes.cubehelix_palette(reverse=True)
nt.assert_list_equal(pal_forward, pal_reverse[::-1])
def test_cubehelix_cmap(self):
cmap = palettes.cubehelix_palette(as_cmap=True)
nt.assert_is_instance(cmap, mpl.colors.ListedColormap)
pal = palettes.cubehelix_palette()
x = np.linspace(0, 1, 6)
npt.assert_array_equal(cmap(x)[:, :3], pal)
cmap_rev = palettes.cubehelix_palette(as_cmap=True, reverse=True)
x = np.linspace(0, 1, 6)
pal_forward = cmap(x).tolist()
pal_reverse = cmap_rev(x[::-1]).tolist()
nt.assert_list_equal(pal_forward, pal_reverse)
def test_xkcd_palette(self):
names = list(xkcd_rgb.keys())[10:15]
colors = palettes.xkcd_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, xkcd_rgb[name])
def test_crayon_palette(self):
names = list(crayons.keys())[10:15]
colors = palettes.crayon_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, crayons[name].lower())
def test_color_codes(self):
palettes.set_color_codes("deep")
colors = palettes.color_palette("deep") + [".1"]
for code, color in zip("bgrmyck", colors):
rgb_want = mpl.colors.colorConverter.to_rgb(color)
rgb_got = mpl.colors.colorConverter.to_rgb(code)
nt.assert_equal(rgb_want, rgb_got)
palettes.set_color_codes("reset")
def test_as_hex(self):
pal = palettes.color_palette("deep")
for rgb, hex in zip(pal, pal.as_hex()):
nt.assert_equal(mpl.colors.rgb2hex(rgb), hex)
def test_preserved_palette_length(self):
pal_in = palettes.color_palette("Set1", 10)
pal_out = palettes.color_palette(pal_in)
nt.assert_equal(pal_in, pal_out)
| bsd-3-clause |
caisq/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 39 | 32726 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([(k,
out_el_shape(v, n_classes[k]
if n_classes is not None and
k in n_classes else None))
for k, v in list(y_shape.items())])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
@deprecated(None, 'Please convert numpy dtypes explicitly.')
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(
x, dict), y is not None and isinstance(y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (dict(
[(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (_check_dtype(self._y.dtype)
if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1))
if len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(
self._x.values())[0].shape[0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else {
self._input_placeholder.name:
extract(self._x, batch_indices)
})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = (self.output_shape, self._output_dtype,
self.n_classes)
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = (
[1] + list(y_first_el[0].shape
if isinstance(y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict(
[(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
@deprecated(None, 'Please feed input to tf.data to support dask.')
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.placeholder for input features mini batch.
output_placeholder: tf.placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/numpydoc/docscrape_sphinx.py | 8 | 9527 | from __future__ import division, absolute_import, print_function
import sys
import re
import inspect
import textwrap
import pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self, name='Returns'):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', '', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns('Returns')
out += self._str_returns('Yields')
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| gpl-3.0 |
qPCR4vir/orange3 | Orange/distance/__init__.py | 3 | 8291 | import numpy as np
from scipy import stats
import sklearn.metrics as skl_metrics
from Orange import data
from Orange.misc import DistMatrix
from Orange.preprocess import SklImpute
__all__ = ['Euclidean', 'Manhattan', 'Cosine', 'Jaccard', 'SpearmanR', 'SpearmanRAbsolute',
'PearsonR', 'PearsonRAbsolute', 'Mahalanobis', 'MahalanobisDistance']
def _preprocess(table):
"""Remove categorical attributes and impute missing values."""
if not len(table):
return table
new_domain = data.Domain([a for a in table.domain.attributes if a.is_continuous],
table.domain.class_vars,
table.domain.metas)
new_data = data.Table(new_domain, table)
new_data = SklImpute(new_data)
return new_data
def _orange_to_numpy(x):
"""Convert :class:`Orange.data.Table` and :class:`Orange.data.RowInstance` to :class:`numpy.ndarray`."""
if isinstance(x, data.Table):
return x.X
elif isinstance(x, data.Instance):
return np.atleast_2d(x.x)
elif isinstance(x, np.ndarray):
return np.atleast_2d(x)
else:
return x # e.g. None
class Distance:
def __call__(self, e1, e2=None, axis=1, impute=False):
"""
:param e1: input data instances, we calculate distances between all pairs
:type e1: :class:`Orange.data.Table` or :class:`Orange.data.RowInstance` or :class:`numpy.ndarray`
:param e2: optional second argument for data instances
if provided, distances between each pair, where first item is from e1 and second is from e2, are calculated
:type e2: :class:`Orange.data.Table` or :class:`Orange.data.RowInstance` or :class:`numpy.ndarray`
:param axis: if axis=1 we calculate distances between rows,
if axis=0 we calculate distances between columns
:type axis: int
:param impute: if impute=True all NaN values in matrix are replaced with 0
:type impute: bool
:return: the matrix with distances between given examples
:rtype: :class:`Orange.misc.distmatrix.DistMatrix`
"""
raise NotImplementedError('Distance is an abstract class and should not be used directly.')
class SklDistance(Distance):
"""Generic scikit-learn distance."""
def __init__(self, metric, name, supports_sparse):
"""
Args:
metric: The metric to be used for distance calculation
name (str): Name of the distance
supports_sparse (boolean): Whether this metric works on sparse data or not.
"""
self.metric = metric
self.name = name
self.supports_sparse = supports_sparse
def __call__(self, e1, e2=None, axis=1, impute=False):
x1 = _orange_to_numpy(e1)
x2 = _orange_to_numpy(e2)
if axis == 0:
x1 = x1.T
if x2 is not None:
x2 = x2.T
dist = skl_metrics.pairwise.pairwise_distances(x1, x2, metric=self.metric)
if isinstance(e1, data.Table) or isinstance(e1, data.RowInstance):
dist = DistMatrix(dist, e1, e2, axis)
else:
dist = DistMatrix(dist)
return dist
Euclidean = SklDistance('euclidean', 'Euclidean', True)
Manhattan = SklDistance('manhattan', 'Manhattan', True)
Cosine = SklDistance('cosine', 'Cosine', True)
Jaccard = SklDistance('jaccard', 'Jaccard', False)
class SpearmanDistance(Distance):
""" Generic Spearman's rank correlation coefficient. """
def __init__(self, absolute, name):
"""
Constructor for Spearman's and Absolute Spearman's distances.
Args:
absolute (boolean): Whether to use absolute values or not.
name (str): Name of the distance
Returns:
If absolute=True return Spearman's Absolute rank class else return Spearman's rank class.
"""
self.absolute = absolute
self.name = name
self.supports_sparse = False
def __call__(self, e1, e2=None, axis=1, impute=False):
x1 = _orange_to_numpy(e1)
x2 = _orange_to_numpy(e2)
if x2 is None:
x2 = x1
slc = len(x1) if axis == 1 else x1.shape[1]
rho, _ = stats.spearmanr(x1, x2, axis=axis)
if np.isnan(rho).any() and impute:
rho = np.nan_to_num(rho)
if self.absolute:
dist = (1. - np.abs(rho)) / 2.
else:
dist = (1. - rho) / 2.
if isinstance(dist, np.float):
dist = np.array([[dist]])
elif isinstance(dist, np.ndarray):
dist = dist[:slc, slc:]
if isinstance(e1, data.Table) or isinstance(e1, data.RowInstance):
dist = DistMatrix(dist, e1, e2, axis)
else:
dist = DistMatrix(dist)
return dist
SpearmanR = SpearmanDistance(absolute=False, name='Spearman')
SpearmanRAbsolute = SpearmanDistance(absolute=True, name='Spearman absolute')
class PearsonDistance(Distance):
""" Generic Pearson's rank correlation coefficient. """
def __init__(self, absolute, name):
"""
Constructor for Pearson's and Absolute Pearson's distances.
Args:
absolute (boolean): Whether to use absolute values or not.
name (str): Name of the distance
Returns:
If absolute=True return Pearson's Absolute rank class else return Pearson's rank class.
"""
self.absolute = absolute
self.name = name
self.supports_sparse = False
def __call__(self, e1, e2=None, axis=1, impute=False):
x1 = _orange_to_numpy(e1)
x2 = _orange_to_numpy(e2)
if x2 is None:
x2 = x1
if axis == 0:
x1 = x1.T
x2 = x2.T
rho = np.array([[stats.pearsonr(i, j)[0] for j in x2] for i in x1])
if np.isnan(rho).any() and impute:
rho = np.nan_to_num(rho)
if self.absolute:
dist = (1. - np.abs(rho)) / 2.
else:
dist = (1. - rho) / 2.
if isinstance(e1, data.Table) or isinstance(e1, data.RowInstance):
dist = DistMatrix(dist, e1, e2, axis)
else:
dist = DistMatrix(dist)
return dist
PearsonR = PearsonDistance(absolute=False, name='Pearson')
PearsonRAbsolute = PearsonDistance(absolute=True, name='Pearson absolute')
class MahalanobisDistance(Distance):
"""Mahalanobis distance."""
def __init__(self, data=None, axis=1, name='Mahalanobis'):
self.name = name
self.supports_sparse = False
self.axis = None
self.VI = None
if data is not None:
self.fit(data, axis)
def fit(self, data, axis=1):
"""
Compute the covariance matrix needed for calculating distances.
Args:
data: The dataset used for calculating covariances.
axis: If axis=1 we calculate distances between rows, if axis=0 we calculate distances between columns.
"""
x = _orange_to_numpy(data)
if axis == 0:
x = x.T
n, m = x.shape
if n <= m:
raise ValueError('Too few observations for the number of dimensions.')
self.axis = axis
self.VI = np.linalg.inv(np.cov(x.T))
def __call__(self, e1, e2=None, axis=None, impute=False):
assert self.VI is not None, "Mahalanobis distance must be initialized with the fit() method."
x1 = _orange_to_numpy(e1)
x2 = _orange_to_numpy(e2)
if axis is not None:
assert axis == self.axis, "Axis must match its value at initialization."
if self.axis == 0:
x1 = x1.T
if x2 is not None:
x2 = x2.T
if x1.shape[1] != self.VI.shape[0] or x2 is not None and x2.shape[1] != self.VI.shape[0]:
raise ValueError('Incorrect number of features.')
dist = skl_metrics.pairwise.pairwise_distances(x1, x2, metric='mahalanobis', VI=self.VI)
if np.isnan(dist).any() and impute:
dist = np.nan_to_num(dist)
if isinstance(e1, data.Table) or isinstance(e1, data.RowInstance):
dist = DistMatrix(dist, e1, e2, self.axis)
else:
dist = DistMatrix(dist)
return dist
Mahalanobis = MahalanobisDistance()
| bsd-2-clause |
clemkoa/scikit-learn | sklearn/datasets/__init__.py | 61 | 3734 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_breast_cancer
from .base import load_boston
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_sample_images
from .base import load_sample_image
from .base import load_wine
from .base import get_data_home
from .base import clear_data_home
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'load_wine',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
PlayUAV/MissionPlanner | Lib/site-packages/numpy/lib/polynomial.py | 58 | 35930 | """
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import isscalar, abs, finfo, atleast_1d, hstack
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag
from numpy.linalg import eigvals, lstsq
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError, "input must be 1d or square 2d array."
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0,roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with
a given sequence of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] Wikipedia, "Companion matrix",
http://en.wikipedia.org/wiki/Companion_matrix
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError, "Order of integral must be positive (see polyder)"
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError, \
"k must be a scalar or a rank-1 array of length 1 or >m."
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError, "Order of derivative must be positive (see polyint)"
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than this
relative to the largest singular value will be ignored. The default
value is len(x)*eps, where eps is the relative precision of the float
type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is
False (the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is also
returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first.
If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond : present only if `full` = True
Residuals of the least-squares fit, the effective rank of the scaled
Vandermonde coefficient matrix, its singular values, and the specified
value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
...
x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError, "expected deg >= 0"
if x.ndim != 1:
raise TypeError, "expected 1D vector for x"
if x.size == 0:
raise TypeError, "expected non-empty vector for x"
if y.ndim < 1 or y.ndim > 2 :
raise TypeError, "expected 1D or 2D array for y"
if x.shape[0] != y.shape[0] :
raise TypeError, "expected x and y to have same length"
# set rcond
if rcond is None :
rcond = len(x)*finfo(x.dtype).eps
# scale x to improve condition number
scale = abs(x).max()
if scale != 0 :
x /= scale
# solve least squares equation for powers of x
v = vander(x, order)
c, resids, rank, s = lstsq(v, y, rcond)
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
# scale returned coefficients
if scale != 0 :
if c.ndim == 1 :
c /= vander([scale], order)[0]
else :
c /= vander([scale], order).T
if full :
return c, resids, rank, s, rcond
else :
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = x * y + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1,a2 = poly1d(a1),poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while 1:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2)+len(toadd2) > wrap) or \
(len(line1)+len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError, "Polynomial must be 1d only."
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError, "Power to non-negative integers only."
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
return NX.alltrue(self.coeffs == other.coeffs)
def __ne__(self, other):
return NX.any(self.coeffs != other.coeffs)
def __setattr__(self, key, val):
raise ValueError, "Attributes cannot be changed this way."
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c','coef','coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError, "Does not support negative powers."
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always',RankWarning)
| gpl-3.0 |
magenta/ddsp | ddsp/training/summaries.py | 1 | 16561 | # Copyright 2021 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Library of tensorboard summary functions relevant to DDSP training."""
import io
import ddsp
from ddsp.core import tf_float32
from ddsp.training.plotting import pianoroll_plot_setup
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import note_seq
from note_seq import sequences_lib
import numpy as np
import tensorflow.compat.v2 as tf
def fig_summary(tag, fig, step):
"""Writes an image summary from a string buffer of an mpl figure.
This writer writes a scalar summary in V1 format using V2 API.
Args:
tag: An arbitrary tag name for this summary.
fig: A matplotlib figure.
step: The `int64` monotonic step variable, which defaults
to `tf.compat.v1.train.get_global_step`.
"""
buffer = io.BytesIO()
fig.savefig(buffer, format='png')
image_summary = tf.compat.v1.Summary.Image(
encoded_image_string=buffer.getvalue())
plt.close(fig)
pb = tf.compat.v1.Summary()
pb.value.add(tag=tag, image=image_summary)
serialized = tf.convert_to_tensor(pb.SerializeToString())
tf.summary.experimental.write_raw_pb(serialized, step=step, name=tag)
def waveform_summary(audio, audio_gen, step, name=''):
"""Creates a waveform plot summary for a batch of audio."""
def plot_waveform(i, length=None, prefix='waveform', name=''):
"""Plots a waveforms."""
waveform = np.squeeze(audio[i])
waveform = waveform[:length] if length is not None else waveform
waveform_gen = np.squeeze(audio_gen[i])
waveform_gen = waveform_gen[:length] if length is not None else waveform_gen
# Manually specify exact size of fig for tensorboard
fig, (ax0, ax1) = plt.subplots(2, 1, figsize=(2.5, 2.5))
ax0.plot(waveform)
ax1.plot(waveform_gen)
# Format and save plot to image
name = name + '_' if name else ''
tag = f'waveform/{name}{prefix}_{i+1}'
fig_summary(tag, fig, step)
# Make plots at multiple lengths.
batch_size = int(audio.shape[0])
for i in range(batch_size):
plot_waveform(i, length=None, prefix='full', name=name)
plot_waveform(i, length=2000, prefix='125ms', name=name)
def get_spectrogram(audio, rotate=False, size=1024):
"""Compute logmag spectrogram."""
mag = ddsp.spectral_ops.compute_logmag(tf_float32(audio), size=size)
if rotate:
mag = np.rot90(mag)
return mag
def _plt_spec(spec, ax, title):
"""Helper function to plot a spectrogram to an axis."""
spec = np.rot90(spec)
ax.matshow(spec, vmin=-5, vmax=1, aspect='auto', cmap=plt.cm.magma)
ax.set_title(title)
ax.set_xticks([])
ax.set_yticks([])
def spectrogram_summary(audio, audio_gen, step, name='', tag='spectrogram'):
"""Writes a summary of spectrograms for a batch of images."""
specgram = lambda a: ddsp.spectral_ops.compute_logmag(tf_float32(a), size=768)
# Batch spectrogram operations
spectrograms = specgram(audio)
spectrograms_gen = specgram(audio_gen)
batch_size = int(audio.shape[0])
name = name + '_' if name else ''
for i in range(batch_size):
# Manually specify exact size of fig for tensorboard
fig, axs = plt.subplots(2, 1, figsize=(8, 8))
_plt_spec(spectrograms[i], axs[0], 'original')
_plt_spec(spectrograms_gen[i], axs[1], 'synthesized')
# Format and save plot to image
tag_i = f'{tag}/{name}{i+1}'
fig_summary(tag_i, fig, step)
def audio_summary(audio, step, sample_rate=16000, name='audio'):
"""Update metrics dictionary given a batch of audio."""
# Ensure there is a single channel dimension.
batch_size = int(audio.shape[0])
if len(audio.shape) == 2:
audio = audio[:, :, tf.newaxis]
tf.summary.audio(
name, audio, sample_rate, step, max_outputs=batch_size, encoding='wav')
def f0_summary(f0_hz, f0_hz_predict, step, name='f0_midi', tag='f0_midi'):
"""Creates a plot comparison of ground truth f0_hz and predicted values."""
batch_size = int(f0_hz.shape[0])
# Resample predictions to match ground truth if they don't already.
if f0_hz.shape[1] != f0_hz_predict.shape[1]:
f0_hz_predict = ddsp.core.resample(f0_hz_predict, f0_hz.shape[1])
for i in range(batch_size):
f0_midi = ddsp.core.hz_to_midi(tf.squeeze(f0_hz[i]))
f0_midi_predict = ddsp.core.hz_to_midi(tf.squeeze(f0_hz_predict[i]))
# Manually specify exact size of fig for tensorboard
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(6.0, 2.0))
ax0.plot(f0_midi)
ax0.plot(f0_midi_predict)
ax0.set_title('original vs. predicted')
ax1.plot(f0_midi_predict)
ax1.set_title('predicted')
# Format and save plot to image
tag = f'{tag}/{name}_{i + 1}'
fig_summary(tag, fig, step)
def midi_summary(controls, step, name, frame_rate, notes_key):
"""Plots segmented midi with controls."""
batch_size = controls['f0_hz'].shape[0]
for i in range(batch_size):
amps = controls['harmonic']['controls']['amplitudes'][i]
f0_hz = ddsp.core.hz_to_midi(controls['f0_hz'][i])
fig, ax = plt.subplots(2, 1, figsize=(6.0, 4.0))
ax[0].semilogy(amps, label='controls')
ax[0].set_title('Amps')
ax[1].plot(f0_hz, label='controls')
ax[1].set_title('f0')
notes_f0 = np.zeros_like(f0_hz)
notes_amps = np.zeros_like(amps)
markers = []
note_sequence = controls[notes_key][i]
for note in note_sequence.notes:
start_time = int(note.start_time * frame_rate)
end_time = int(note.end_time * frame_rate)
notes_f0[start_time:end_time] = note.pitch
notes_amps[start_time:end_time] = note.velocity / 1e3
markers.append(start_time)
ax[0].plot(notes_amps, '-d', label='notes', markevery=markers)
ax[0].legend()
ax[1].plot(notes_f0, '-d', label='notes', markevery=markers)
ax[1].legend()
fig_summary(f'midi/{name}_{i + 1}', fig, step)
def _get_reasonable_f0_min_max(f0_midi,
max_spike=5.0,
min_midi_value=5.0,
pad=6.0):
"""Find the min and max for an f0 plot, ignoring spike glitches and low notes.
This function finds the min and max of the f0 after two operations to omit
values. The first does np.diff() to examine the difference between adjacent
values in the f0 curve. Values in abs(diff) above `max_spikes` are omitted.
The second operation excludes MIDI notes below a threshold as determined by
`min_midi_values`. After those two operations the min and max are found, and
a padding (`pad`) value is added to the max and subtracted from the min
before returning.
Args:
f0_midi: f0 curve in MIDI space.
max_spike: Max value between successive diff values that will be included
in the final min/max calculation.
min_midi_value: Any MIDI values below this number will not be included in
the final min/max calulation.
pad: Value that will be added to the max and subtracted from the min.
Returns:
min_, max_: Values for an f0 plot that enphasizes the parts we care about.
"""
# Mask out the 'spikes' by thresholding the diff above a value.
diff = np.diff(f0_midi)
diff = np.insert(diff, 0, 0.0)
diff_mask = np.ma.masked_outside(diff, -max_spike, max_spike)
# Remove any notes below the min.
f0_mask = np.ma.masked_less(f0_midi, min_midi_value)
# Combine the two masked arrays
comb_masks = np.ma.array(
f0_midi,
mask=np.logical_or(diff_mask.mask, f0_mask.mask)
)
# Comute min/max with the padding and return.
min_ = np.floor(np.min(comb_masks) - pad)
max_ = np.ceil(np.max(comb_masks) + pad)
return min_, max_
def _midiae_f0_helper(q_pitch, f0_midi, curve, i, step, label, tag):
"""Helper function to plot F0 info with MIDI AE."""
min_, max_ = _get_reasonable_f0_min_max(f0_midi)
plt.close('all')
fig, ax, sp = pianoroll_plot_setup(figsize=(6.0, 4.0))
sp.set_ylabel('MIDI Note Value')
ax.step(q_pitch, 'r', linewidth=1.0, label='q_pitch')
ax.plot(f0_midi, 'dodgerblue', linewidth=1.5, label='input f0')
ax.plot(curve, 'darkgreen', linewidth=1.25, label=label)
ax.set_ylim(min_, max_)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.legend()
fig_summary(f'{tag}/ex_{i + 1}', fig, step)
def midiae_f0_summary(f0_hz, outputs, step):
"""Makes plots to inspect f0/pitch components of MidiAE.
Args:
f0_hz: The input f0 to the network.
outputs: Output dictionary from the MidiAe net.
step: The step that the optimizer is currently on.
"""
batch_size = int(f0_hz.shape[0])
for i in range(batch_size):
f0_midi = ddsp.core.hz_to_midi(tf.squeeze(f0_hz[i]))
q_pitch = np.squeeze(outputs['q_pitch'][i])
f0_rec = np.squeeze(outputs['f0_midi_pred'][i])
_midiae_f0_helper(q_pitch, f0_midi, f0_rec, i, step, 'rec_f0',
'midiae_decoder_pitch')
if 'f0_midi_rec2' in outputs:
f0_rec2 = np.squeeze(outputs['f0_midi_pred2'][i])
_midiae_f0_helper(q_pitch, f0_midi, f0_rec2, i, step, 'rec_f0_2',
'midiae_decoder_pitch2')
if 'pitch' in outputs:
raw_pitch = np.squeeze(outputs['pitch'][i])
_midiae_f0_helper(q_pitch, f0_midi, raw_pitch, i, step, 'z_pitch',
'midiae_encoder_pitch')
def _midiae_ld_helper(ld_input, ld_rec, curve, db_key, i, step, label, tag):
"""Helper function to plot loudness info with MIDI AE."""
fig = plt.figure(figsize=(6.0, 4.0))
plt.plot(ld_input, linewidth=1.5, label='input ld')
plt.plot(ld_rec, 'g', linewidth=1.25, label='rec ld')
plt.step(curve, 'r', linewidth=0.75, label=label)
plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))
plt.legend()
fig_summary(f'{tag}/{db_key}_{i + 1}', fig, step)
def midiae_ld_summary(ld_feat, outputs, step, db_key='loudness_db'):
"""Makes plots to inspect loudness/velocity components of MidiAE.
Args:
ld_feat: The input loudness feature to the network.
outputs: Output dictionary from the MidiAe net.
step: The step that the optimizer is currently on
db_key: Name of the loudness key (power_db or loudness_db).
"""
batch_size = int(ld_feat.shape[0])
for i in range(batch_size):
ld_input = np.squeeze(ld_feat[i])
ld_rec = np.squeeze(outputs[f'{db_key}_rec'][i])
vel_quant = np.squeeze(outputs['velocity_quant'][i])
_midiae_ld_helper(ld_input, ld_rec, vel_quant, db_key, i, step,
'q_vel', 'midiae_decoder_ld')
if f'{db_key}_rec2' in outputs:
ld_rec2 = np.squeeze(outputs[f'{db_key}_rec2'][i])
_midiae_ld_helper(ld_input, ld_rec2, vel_quant, db_key, i, step,
'q_vel', 'midiae_decoder_ld2')
if 'velocity' in outputs:
vel = np.squeeze(outputs['velocity'][i])
_midiae_ld_helper(ld_input, ld_rec, vel, db_key, i, step,
'vel', 'midiae_encoder_ld')
def midiae_sp_summary(outputs, step):
"""Synth Params summaries."""
batch_size = int(outputs['f0_hz'].shape[0])
have_pred = 'amps_pred' in outputs
height = 12 if have_pred else 4
rows = 3 if have_pred else 1
for i in range(batch_size):
# Amplitudes ----------------------------
amps = np.squeeze(outputs['amps'][i])
fig, ax = plt.subplots(nrows=rows, ncols=1, figsize=(8, height))
ax[0].plot(amps)
ax[0].set_title('Amplitudes - synth_params')
if have_pred:
amps_pred = np.squeeze(outputs['amps_pred'][i])
ax[1].plot(amps_pred)
ax[1].set_title('Amplitudes - pred')
amps_diff = amps - amps_pred
ax[2].plot(amps_diff)
ax[2].set_title('Amplitudes - diff')
for ax in fig.axes:
ax.label_outer()
fig_summary(f'amplitudes/amplitudes_{i + 1}', fig, step)
# Harmonic Distribution ------------------
hd = np.log(np.squeeze(outputs['hd'][i]) + 1e-8)
fig, ax = plt.subplots(nrows=rows, ncols=1, figsize=(8, height))
im = ax[0].imshow(hd.T, aspect='auto', origin='lower')
fig.colorbar(im, ax=ax[0])
ax[0].set_title('Harmonic Distribution (log) - synth_params')
if have_pred:
hd_pred = np.log(np.squeeze(outputs['hd_pred'][i]) + 1e-8)
im = ax[1].imshow(hd_pred.T, aspect='auto', origin='lower')
fig.colorbar(im, ax=ax[1])
ax[1].set_title('Harmonic Distribution (log) - pred')
hd_diff = hd - hd_pred
im = ax[2].imshow(hd_diff.T, aspect='auto', origin='lower')
fig.colorbar(im, ax=ax[2])
ax[2].set_title('Harmonic Distribution (log) - diff')
for ax in fig.axes:
ax.label_outer()
fig_summary(f'harmonic_dist/harmonic_dist_{i + 1}', fig, step)
# Magnitudes ----------------------------
noise = np.squeeze(outputs['noise'][i])
fig, ax = plt.subplots(nrows=rows, ncols=1, figsize=(8, height))
im = ax[0].imshow(noise.T, aspect='auto', origin='lower')
fig.colorbar(im, ax=ax[0])
ax[0].set_title('Noise mags - synth_params')
if have_pred:
noise_pred = np.squeeze(outputs['noise_pred'][i])
im = ax[1].imshow(noise_pred.T, aspect='auto', origin='lower')
fig.colorbar(im, ax=ax[1])
ax[1].set_title('Noise mags - pred')
noise_diff = noise - noise_pred
im = ax[2].imshow(noise_diff.T, aspect='auto', origin='lower')
fig.colorbar(im, ax=ax[2])
ax[2].set_title('Noise mags - diff')
for ax in fig.axes:
ax.label_outer()
fig_summary(f'noise_mags/noise_mags_{i + 1}', fig, step)
def pianoroll_summary(batch, step, name, frame_rate, pred_key,
gt_key='note_active_velocities', ch=None,
threshold=0.0):
"""Plots ground truth pianoroll against predicted MIDI."""
batch_size = batch[gt_key].shape[0]
for i in range(batch_size):
if ch is None:
gt_pianoroll = batch[gt_key][i]
pred_pianoroll = batch[pred_key][i]
else:
gt_pianoroll = batch[gt_key][i, ..., ch]
pred_pianoroll = batch[pred_key][i, ..., ch]
if isinstance(pred_pianoroll, note_seq.NoteSequence):
pred_pianoroll = sequences_lib.sequence_to_pianoroll(
pred_pianoroll,
frames_per_second=frame_rate,
min_pitch=note_seq.MIN_MIDI_PITCH,
max_pitch=note_seq.MAX_MIDI_PITCH).active[:-1, :]
img = np.zeros((gt_pianoroll.shape[1], gt_pianoroll.shape[0], 4))
# All values in `rgb` should be 0.0 except the value at index `idx`
gt_color = {'idx': 1, 'rgb': np.array([0.0, 1.0, 0.0])} # green
pred_color = {'idx': 2, 'rgb': np.array([0.0, 0.0, 1.0])} # blue
gt_pianoroll_t = np.transpose(gt_pianoroll)
pred_pianoroll_t = np.transpose(pred_pianoroll)
img[:, :, gt_color['idx']] = gt_pianoroll_t
img[:, :, pred_color['idx']] = pred_pianoroll_t
# this is the alpha channel:
img[:, :, 3] = np.logical_or(gt_pianoroll_t > threshold,
pred_pianoroll_t > threshold)
# Determine the min & max y-values for plotting.
gt_note_indices = np.argmax(gt_pianoroll, axis=1)
pred_note_indices = np.argmax(pred_pianoroll, axis=1)
all_note_indices = np.concatenate([gt_note_indices, pred_note_indices])
if np.sum(np.nonzero(all_note_indices)) > 0:
lower_limit = np.min(all_note_indices[np.nonzero(all_note_indices)])
upper_limit = np.max(all_note_indices)
else:
lower_limit = 0
upper_limit = 127
# Make the figures and add them to the summary.
fig, ax, _ = pianoroll_plot_setup(figsize=(6.0, 4.0),
xlim=[0, img.shape[1]])
ax.imshow(img, origin='lower', aspect='auto', interpolation='nearest')
ax.set_ylim((max(lower_limit - 5, 0), min(upper_limit + 5, 127)))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
labels_and_colors = [
('GT MIDI', gt_color['rgb']), # green
('Pred MIDI', pred_color['rgb']), # blue
('Overlap', gt_color['rgb'] + pred_color['rgb']) # cyan
]
patches = [mpatches.Patch(label=l, color=c) for l, c in labels_and_colors]
fig.legend(handles=patches)
fig_summary(f'pianoroll/{name}_{i + 1}', fig, step)
| apache-2.0 |
RPGOne/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
igsr/igsr_analysis | p3/p3BAMQC.py | 1 | 1522 | '''
Created on 27 Jan 2017
@author: ernesto
'''
import pandas as pd
class p3BAMQC:
"""
Class representing a spreadsheet located at ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/working/20130606_sample_info/
20130606_sample_info.xlsx containing information on the BAM QC done for the p3
"""
def __init__(self, filepath):
'''
Constructor
Parameters
----------
filepath: str
Path to the spreadsheet.
book: ExcelFile object
'''
self.filepath = filepath
# Import the excel file and call it xls_file
xls_file = pd.ExcelFile(filepath)
self.book = xls_file
def get_final_qc_results(self, group):
"""
Method to get the sheet corresponding to 'Final QC Results'
Parameters
----------
group: {'low coverage', 'exome'}
Get the data for the low coverage or exome worksheet.
Returns
-------
df : data.frame object
"""
sheet = self.book.parse('Final QC Results', skiprows=1, index_col=[0, 1])
new_column_names = ['VerifyBam_Omni_Free', 'VerifyBam_Affy_Free', 'VerifyBam_Omni_Chip',
'VerifyBam_Affy_Chip', 'Indel_Ratio', 'Passed_QC']
df = ""
if group == "low coverage":
df = sheet.iloc[:, 6:12]
elif group == "exome":
df = sheet.iloc[:, 0:6]
df.columns = new_column_names
return df
| apache-2.0 |
wangjohn/wallace | wallace/predictive_models/gradient_boosting_regression.py | 1 | 1550 | from sklearn import ensemble
from wallace.predictive_models.sklearn_model import SklearnModel, TrainedSklearnModel
from wallace.parameters import ParametersGeneralValidityCheck
class GradientBoostingRegression(SklearnModel):
def train(self, dataset):
model = ensemble.GradientBoostingRegressor(learning_rate=self.get_learning_rate(), \
n_estimators=self.get_number_estimators(), \
max_depth=self.get_max_depth()
)
independent_data = self.get_independent_variable_data(dataset)
dependent_data = self.get_dependent_variable_data(dataset)
trained_regression = model.fit(independent_data, dependent_data)
return TrainedSklearnModel(self, trained_regression)
@classmethod
def validity_check(klass):
validity_check = ParametersGeneralValidityCheck()
validity_check.set_range_parameter("gradient_boosting_regression.learning_rate", 0.0, 1.0)
validity_check.set_integer_range_parameter("gradient_boosting_regression.number_estimators", 1, 1000)
validity_check.set_integer_range_parameter("gradient_boosting_regression.max_depth", 1, 100)
return validity_check
def get_number_estimators(self):
return self.parameter_set.get("gradient_boosting_regression.number_estimators")
def get_learning_rate(self):
return self.parameter_set.get("gradient_boosting_regression.learning_rate")
def get_max_depth(self):
return self.parameter_set.get("gradient_boosting_regression.max_depth")
| mit |
blaze/partd | partd/numpy.py | 2 | 4213 | """ Store arrays
We put arrays on disk as raw bytes, extending along the first dimension.
Alongside each array x we ensure the value x.dtype which stores the string
description of the array's dtype.
"""
from __future__ import absolute_import
import numpy as np
from toolz import valmap, identity, partial
from .compatibility import pickle
from .core import Interface
from .file import File
from .utils import frame, framesplit, suffix, ignoring
def serialize_dtype(dt):
""" Serialize dtype to bytes
>>> serialize_dtype(np.dtype('i4'))
'<i4'
>>> serialize_dtype(np.dtype('M8[us]'))
'<M8[us]'
"""
return dt.str.encode()
def parse_dtype(s):
""" Parse text as numpy dtype
>>> parse_dtype('i4')
dtype('int32')
>>> parse_dtype("[('a', 'i4')]")
dtype([('a', '<i4')])
"""
if s.startswith(b'['):
return np.dtype(eval(s)) # Dangerous!
else:
return np.dtype(s)
class Numpy(Interface):
def __init__(self, partd=None):
if not partd or isinstance(partd, str):
partd = File(partd)
self.partd = partd
Interface.__init__(self)
def __getstate__(self):
return {'partd': self.partd}
def append(self, data, **kwargs):
for k, v in data.items():
self.partd.iset(suffix(k, '.dtype'), serialize_dtype(v.dtype))
self.partd.append(valmap(serialize, data), **kwargs)
def _get(self, keys, **kwargs):
bytes = self.partd._get(keys, **kwargs)
dtypes = self.partd._get([suffix(key, '.dtype') for key in keys],
lock=False)
dtypes = map(parse_dtype, dtypes)
return list(map(deserialize, bytes, dtypes))
def delete(self, keys, **kwargs):
keys2 = [suffix(key, '.dtype') for key in keys]
self.partd.delete(keys2, **kwargs)
def _iset(self, key, value):
return self.partd._iset(key, value)
def drop(self):
return self.partd.drop()
def __del__(self):
self.partd.__del__()
@property
def lock(self):
return self.partd.lock
def __exit__(self, *args):
self.drop()
self.partd.__exit__(self, *args)
try:
from pandas import msgpack
except ImportError:
try:
import msgpack
except ImportError:
msgpack = False
def serialize(x):
if x.dtype == 'O':
l = x.flatten().tolist()
with ignoring(Exception): # Try msgpack (faster on strings)
return frame(msgpack.packb(l, use_bin_type=True))
return frame(pickle.dumps(l, protocol=pickle.HIGHEST_PROTOCOL))
else:
return x.tobytes()
def deserialize(bytes, dtype, copy=False):
if dtype == 'O':
try:
if msgpack.version >= (0, 5, 2):
unpack_kwargs = {'raw': False}
else:
unpack_kwargs = {'encoding': 'utf-8'}
blocks = [msgpack.unpackb(f, **unpack_kwargs)
for f in framesplit(bytes)]
except Exception:
blocks = [pickle.loads(f) for f in framesplit(bytes)]
result = np.empty(sum(map(len, blocks)), dtype='O')
i = 0
for block in blocks:
result[i:i + len(block)] = block
i += len(block)
return result
else:
result = np.frombuffer(bytes, dtype)
if copy:
result = result.copy()
return result
compress_text = identity
decompress_text = identity
compress_bytes = lambda bytes, itemsize: bytes
decompress_bytes = identity
with ignoring(ImportError):
import blosc
blosc.set_nthreads(1)
compress_bytes = blosc.compress
decompress_bytes = blosc.decompress
compress_text = partial(blosc.compress, typesize=1)
decompress_text = blosc.decompress
with ignoring(ImportError):
from snappy import compress as compress_text
from snappy import decompress as decompress_text
def compress(bytes, dtype):
if dtype == 'O':
return compress_text(bytes)
else:
return compress_bytes(bytes, dtype.itemsize)
def decompress(bytes, dtype):
if dtype == 'O':
return decompress_text(bytes)
else:
return decompress_bytes(bytes)
| bsd-3-clause |
detrout/debian-statsmodels | statsmodels/graphics/mosaicplot.py | 2 | 26684 | """Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
see the docstring of the mosaic function for more informations.
"""
# Author: Enrico Giampieri - 21 Jan 2013
from __future__ import division
from statsmodels.compat.python import (iteritems, iterkeys, lrange, string_types, lzip,
itervalues, zip, range)
import numpy as np
from statsmodels.compat.collections import OrderedDict
from itertools import product
from numpy import iterable, r_, cumsum, array
from statsmodels.graphics import utils
__all__ = ["mosaic"]
def _normalize_split(proportion):
"""
return a list of proportions of the available space given the division
if only a number is given, it will assume a split in two pieces
"""
if not iterable(proportion):
if proportion == 0:
proportion = array([0.0, 1.0])
elif proportion >= 1:
proportion = array([1.0, 0.0])
elif proportion < 0:
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
else:
proportion = array([proportion, 1.0 - proportion])
proportion = np.asarray(proportion, dtype=float)
if np.any(proportion < 0):
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
if np.allclose(proportion, 0):
raise ValueError("at least one proportion should be"
"greater than zero".format(proportion))
# ok, data are meaningful, so go on
if len(proportion) < 2:
return array([0.0, 1.0])
left = r_[0, cumsum(proportion)]
left /= left[-1] * 1.0
return left
def _split_rect(x, y, width, height, proportion, horizontal=True, gap=0.05):
"""
Split the given rectangle in n segments whose proportion is specified
along the given axis if a gap is inserted, they will be separated by a
certain amount of space, retaining the relative proportion between them
a gap of 1 correspond to a plot that is half void and the remaining half
space is proportionally divided among the pieces.
"""
x, y, w, h = float(x), float(y), float(width), float(height)
if (w < 0) or (h < 0):
raise ValueError("dimension of the square less than"
"zero w={} h=()".format(w, h))
proportions = _normalize_split(proportion)
# extract the starting point and the dimension of each subdivision
# in respect to the unit square
starting = proportions[:-1]
amplitude = proportions[1:] - starting
# how much each extrema is going to be displaced due to gaps
starting += gap * np.arange(len(proportions) - 1)
# how much the squares plus the gaps are extended
extension = starting[-1] + amplitude[-1] - starting[0]
# normalize everything for fit again in the original dimension
starting /= extension
amplitude /= extension
# bring everything to the original square
starting = (x if horizontal else y) + starting * (w if horizontal else h)
amplitude = amplitude * (w if horizontal else h)
# create each 4-tuple for each new block
results = [(s, y, a, h) if horizontal else (x, s, w, a)
for s, a in zip(starting, amplitude)]
return results
def _reduce_dict(count_dict, partial_key):
"""
Make partial sum on a counter dict.
Given a match for the beginning of the category, it will sum each value.
"""
L = len(partial_key)
count = sum(v for k, v in iteritems(count_dict) if k[:L] == partial_key)
return count
def _key_splitting(rect_dict, keys, values, key_subset, horizontal, gap):
"""
Given a dictionary where each entry is a rectangle, a list of key and
value (count of elements in each category) it split each rect accordingly,
as long as the key start with the tuple key_subset. The other keys are
returned without modification.
"""
result = OrderedDict()
L = len(key_subset)
for name, (x, y, w, h) in iteritems(rect_dict):
if key_subset == name[:L]:
# split base on the values given
divisions = _split_rect(x, y, w, h, values, horizontal, gap)
for key, rect in zip(keys, divisions):
result[name + (key,)] = rect
else:
result[name] = (x, y, w, h)
return result
def _tuplify(obj):
"""convert an object in a tuple of strings (even if it is not iterable,
like a single integer number, but keep the string healthy)
"""
if np.iterable(obj) and not isinstance(obj, string_types):
res = tuple(str(o) for o in obj)
else:
res = (str(obj),)
return res
def _categories_level(keys):
"""use the Ordered dict to implement a simple ordered set
return each level of each category
[[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]]
"""
res = []
for i in zip(*(keys)):
tuplefied = _tuplify(i)
res.append(list(OrderedDict([(j, None) for j in tuplefied])))
return res
def _hierarchical_split(count_dict, horizontal=True, gap=0.05):
"""
Split a square in a hierarchical way given a contingency table.
Hierarchically split the unit square in alternate directions
in proportion to the subdivision contained in the contingency table
count_dict. This is the function that actually perform the tiling
for the creation of the mosaic plot. If the gap array has been specified
it will insert a corresponding amount of space (proportional to the
unit lenght), while retaining the proportionality of the tiles.
Parameters
----------
count_dict : dict
Dictionary containing the contingency table.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0
horizontal : bool
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
Returns
----------
base_rect : dict
A dictionary containing the result of the split.
To each key is associated a 4-tuple of coordinates
that are required to create the corresponding rectangle:
0 - x position of the lower left corner
1 - y position of the lower left corner
2 - width of the rectangle
3 - height of the rectangle
"""
# this is the unit square that we are going to divide
base_rect = OrderedDict([(tuple(), (0, 0, 1, 1))])
# get the list of each possible value for each level
categories_levels = _categories_level(list(iterkeys(count_dict)))
L = len(categories_levels)
# recreate the gaps vector starting from an int
if not np.iterable(gap):
gap = [gap / 1.5 ** idx for idx in range(L)]
# extend if it's too short
if len(gap) < L:
last = gap[-1]
gap = list(*gap) + [last / 1.5 ** idx for idx in range(L)]
# trim if it's too long
gap = gap[:L]
# put the count dictionay in order for the keys
# this will allow some code simplification
count_ordered = OrderedDict([(k, count_dict[k])
for k in list(product(*categories_levels))])
for cat_idx, cat_enum in enumerate(categories_levels):
# get the partial key up to the actual level
base_keys = list(product(*categories_levels[:cat_idx]))
for key in base_keys:
# for each partial and each value calculate how many
# observation we have in the counting dictionary
part_count = [_reduce_dict(count_ordered, key + (partial,))
for partial in cat_enum]
# reduce the gap for subsequents levels
new_gap = gap[cat_idx]
# split the given subkeys in the rectangle dictionary
base_rect = _key_splitting(base_rect, cat_enum, part_count, key,
horizontal, new_gap)
horizontal = not horizontal
return base_rect
def _single_hsv_to_rgb(hsv):
"""Transform a color from the hsv space to the rgb."""
from matplotlib.colors import hsv_to_rgb
return hsv_to_rgb(array(hsv).reshape(1, 1, 3)).reshape(3)
def _create_default_properties(data):
""""Create the default properties of the mosaic given the data
first it will varies the color hue (first category) then the color
saturation (second category) and then the color value
(third category). If a fourth category is found, it will put
decoration on the rectangle. Doesn't manage more than four
level of categories
"""
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
# first level, the hue
L = len(categories_levels[0])
# hue = np.linspace(1.0, 0.0, L+1)[:-1]
hue = np.linspace(0.0, 1.0, L + 2)[:-2]
# second level, the saturation
L = len(categories_levels[1]) if Nlevels > 1 else 1
saturation = np.linspace(0.5, 1.0, L + 1)[:-1]
# third level, the value
L = len(categories_levels[2]) if Nlevels > 2 else 1
value = np.linspace(0.5, 1.0, L + 1)[:-1]
# fourth level, the hatch
L = len(categories_levels[3]) if Nlevels > 3 else 1
hatch = ['', '/', '-', '|', '+'][:L + 1]
# convert in list and merge with the levels
hue = lzip(list(hue), categories_levels[0])
saturation = lzip(list(saturation),
categories_levels[1] if Nlevels > 1 else [''])
value = lzip(list(value),
categories_levels[2] if Nlevels > 2 else [''])
hatch = lzip(list(hatch),
categories_levels[3] if Nlevels > 3 else [''])
# create the properties dictionary
properties = {}
for h, s, v, t in product(hue, saturation, value, hatch):
hv, hn = h
sv, sn = s
vv, vn = v
tv, tn = t
level = (hn,) + ((sn,) if sn else tuple())
level = level + ((vn,) if vn else tuple())
level = level + ((tn,) if tn else tuple())
hsv = array([hv, sv, vv])
prop = {'color': _single_hsv_to_rgb(hsv), 'hatch': tv, 'lw': 0}
properties[level] = prop
return properties
def _normalize_data(data, index):
"""normalize the data to a dict with tuples of strings as keys
right now it works with:
0 - dictionary (or equivalent mappable)
1 - pandas.Series with simple or hierarchical indexes
2 - numpy.ndarrays
3 - everything that can be converted to a numpy array
4 - pandas.DataFrame (via the _normalize_dataframe function)
"""
# if data is a dataframe we need to take a completely new road
# before coming back here. Use the hasattr to avoid importing
# pandas explicitly
if hasattr(data, 'pivot') and hasattr(data, 'groupby'):
data = _normalize_dataframe(data, index)
index = None
# can it be used as a dictionary?
try:
items = list(iteritems(data))
except AttributeError:
# ok, I cannot use the data as a dictionary
# Try to convert it to a numpy array, or die trying
data = np.asarray(data)
temp = OrderedDict()
for idx in np.ndindex(data.shape):
name = tuple(i for i in idx)
temp[name] = data[idx]
data = temp
items = list(iteritems(data))
# make all the keys a tuple, even if simple numbers
data = OrderedDict([_tuplify(k), v] for k, v in items)
categories_levels = _categories_level(list(iterkeys(data)))
# fill the void in the counting dictionary
indexes = product(*categories_levels)
contingency = OrderedDict([(k, data.get(k, 0)) for k in indexes])
data = contingency
# reorder the keys order according to the one specified by the user
# or if the index is None convert it into a simple list
# right now it doesn't do any check, but can be modified in the future
index = lrange(len(categories_levels)) if index is None else index
contingency = OrderedDict()
for key, value in iteritems(data):
new_key = tuple(key[i] for i in index)
contingency[new_key] = value
data = contingency
return data
def _normalize_dataframe(dataframe, index):
"""Take a pandas DataFrame and count the element present in the
given columns, return a hierarchical index on those columns
"""
#groupby the given keys, extract the same columns and count the element
# then collapse them with a mean
data = dataframe[index].dropna()
grouped = data.groupby(index, sort=False)
counted = grouped[index].count()
averaged = counted.mean(axis=1)
return averaged
def _statistical_coloring(data):
"""evaluate colors from the indipendence properties of the matrix
It will encounter problem if one category has all zeros
"""
data = _normalize_data(data, None)
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
total = 1.0 * sum(v for v in itervalues(data))
# count the proportion of observation
# for each level that has the given name
# at each level
levels_count = []
for level_idx in range(Nlevels):
proportion = {}
for level in categories_levels[level_idx]:
proportion[level] = 0.0
for key, value in iteritems(data):
if level == key[level_idx]:
proportion[level] += value
proportion[level] /= total
levels_count.append(proportion)
# for each key I obtain the expected value
# and it's standard deviation from a binomial distribution
# under the hipothesys of independence
expected = {}
for key, value in iteritems(data):
base = 1.0
for i, k in enumerate(key):
base *= levels_count[i][k]
expected[key] = base * total, np.sqrt(total * base * (1.0 - base))
# now we have the standard deviation of distance from the
# expected value for each tile. We create the colors from this
sigmas = dict((k, (data[k] - m) / s) for k, (m, s) in iteritems(expected))
props = {}
for key, dev in iteritems(sigmas):
red = 0.0 if dev < 0 else (dev / (1 + dev))
blue = 0.0 if dev > 0 else (dev / (-1 + dev))
green = (1.0 - red - blue) / 2.0
hatch = 'x' if dev > 2 else 'o' if dev < -2 else ''
props[key] = {'color': [red, green, blue], 'hatch': hatch}
return props
def _create_labels(rects, horizontal, ax, rotation):
"""find the position of the label for each value of each category
right now it supports only up to the four categories
ax: the axis on which the label should be applied
rotation: the rotation list for each side
"""
categories = _categories_level(list(iterkeys(rects)))
if len(categories) > 4:
msg = ("maximum of 4 level supported for axes labeling..and 4"
"is alreay a lot of level, are you sure you need them all?")
raise NotImplementedError(msg)
labels = {}
#keep it fixed as will be used a lot of times
items = list(iteritems(rects))
vertical = not horizontal
#get the axis ticks and labels locator to put the correct values!
ax2 = ax.twinx()
ax3 = ax.twiny()
#this is the order of execution for horizontal disposition
ticks_pos = [ax.set_xticks, ax.set_yticks, ax3.set_xticks, ax2.set_yticks]
ticks_lab = [ax.set_xticklabels, ax.set_yticklabels,
ax3.set_xticklabels, ax2.set_yticklabels]
#for the vertical one, rotate it by one
if vertical:
ticks_pos = ticks_pos[1:] + ticks_pos[:1]
ticks_lab = ticks_lab[1:] + ticks_lab[:1]
#clean them
for pos, lab in zip(ticks_pos, ticks_lab):
pos([])
lab([])
#for each level, for each value in the level, take the mean of all
#the sublevel that correspond to that partial key
for level_idx, level in enumerate(categories):
#this dictionary keep the labels only for this level
level_ticks = dict()
for value in level:
#to which level it should refer to get the preceding
#values of labels? it's rather a tricky question...
#this is dependent on the side. It's a very crude management
#but I couldn't think a more general way...
if horizontal:
if level_idx == 3:
index_select = [-1, -1, -1]
else:
index_select = [+0, -1, -1]
else:
if level_idx == 3:
index_select = [+0, -1, +0]
else:
index_select = [-1, -1, -1]
#now I create the base key name and append the current value
#It will search on all the rects to find the corresponding one
#and use them to evaluate the mean position
basekey = tuple(categories[i][index_select[i]]
for i in range(level_idx))
basekey = basekey + (value,)
subset = dict((k, v) for k, v in items
if basekey == k[:level_idx + 1])
#now I extract the center of all the tiles and make a weighted
#mean of all these center on the area of the tile
#this should give me the (more or less) correct position
#of the center of the category
vals = list(itervalues(subset))
W = sum(w * h for (x, y, w, h) in vals)
x_lab = sum((x + w / 2.0) * w * h / W for (x, y, w, h) in vals)
y_lab = sum((y + h / 2.0) * w * h / W for (x, y, w, h) in vals)
#now base on the ordering, select which position to keep
#needs to be written in a more general form of 4 level are enough?
#should give also the horizontal and vertical alignment
side = (level_idx + vertical) % 4
level_ticks[value] = y_lab if side % 2 else x_lab
#now we add the labels of this level to the correct axis
ticks_pos[level_idx](list(itervalues(level_ticks)))
ticks_lab[level_idx](list(iterkeys(level_ticks)),
rotation=rotation[level_idx])
return labels
def mosaic(data, index=None, ax=None, horizontal=True, gap=0.005,
properties=lambda key: None, labelizer=None,
title='', statistic=False, axes_label=True,
label_rotation=0.0):
"""Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
Parameters
----------
data : dict, pandas.Series, np.ndarray, pandas.DataFrame
The contingency table that contains the data.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0. The order
of the keys will be the same as the one of insertion.
If a dict of a Series (or any other dict like object)
is used, it will take the keys as labels. If a
np.ndarray is provided, it will generate a simple
numerical labels.
index: list, optional
Gives the preferred order for the category ordering. If not specified
will default to the given order. It doesn't support named indexes
for hierarchical Series. If a DataFrame is provided, it expects
a list with the name of the columns.
ax : matplotlib.Axes, optional
The graph where display the mosaic. If not given, will
create a new figure
horizontal : bool, optional (default True)
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
labelizer : function (key) -> string, optional
A function that generate the text to display at the center of
each tile base on the key of that tile
properties : function (key) -> dict, optional
A function that for each tile in the mosaic take the key
of the tile and returns the dictionary of properties
of the generated Rectangle, like color, hatch or similar.
A default properties set will be provided fot the keys whose
color has not been defined, and will use color variation to help
visually separates the various categories. It should return None
to indicate that it should use the default property for the tile.
A dictionary of the properties for each key can be passed,
and it will be internally converted to the correct function
statistic: bool, optional (default False)
if true will use a crude statistical model to give colors to the plot.
If the tile has a containt that is more than 2 standard deviation
from the expected value under independence hipotesys, it will
go from green to red (for positive deviations, blue otherwise) and
will acquire an hatching when crosses the 3 sigma.
title: string, optional
The title of the axis
axes_label: boolean, optional
Show the name of each value of each category
on the axis (default) or hide them.
label_rotation: float or list of float
the rotation of the axis label (if present). If a list is given
each axis can have a different rotation
Returns
----------
fig : matplotlib.Figure
The generate figure
rects : dict
A dictionary that has the same keys of the original
dataset, that holds a reference to the coordinates of the
tile and the Rectangle that represent it
See Also
----------
A Brief History of the Mosaic Display
Michael Friendly, York University, Psychology Department
Journal of Computational and Graphical Statistics, 2001
Mosaic Displays for Loglinear Models.
Michael Friendly, York University, Psychology Department
Proceedings of the Statistical Graphics Section, 1992, 61-68.
Mosaic displays for multi-way contingecy tables.
Michael Friendly, York University, Psychology Department
Journal of the american statistical association
March 1994, Vol. 89, No. 425, Theory and Methods
Examples
----------
The most simple use case is to take a dictionary and plot the result
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> mosaic(data, title='basic dictionary')
>>> pylab.show()
A more useful example is given by a dictionary with multiple indices.
In this case we use a wider gap to a better visual separation of the
resulting plot
>>> data = {('a', 'b'): 1, ('a', 'c'): 2, ('d', 'b'): 3, ('d', 'c'): 4}
>>> mosaic(data, gap=0.05, title='complete dictionary')
>>> pylab.show()
The same data can be given as a simple or hierarchical indexed Series
>>> rand = np.random.random
>>> from itertools import product
>>>
>>> tuples = list(product(['bar', 'baz', 'foo', 'qux'], ['one', 'two']))
>>> index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
>>> data = pd.Series(rand(8), index=index)
>>> mosaic(data, title='hierarchical index series')
>>> pylab.show()
The third accepted data structureis the np array, for which a
very simple index will be created.
>>> rand = np.random.random
>>> data = 1+rand((2,2))
>>> mosaic(data, title='random non-labeled array')
>>> pylab.show()
If you need to modify the labeling and the coloring you can give
a function tocreate the labels and one with the graphical properties
starting from the key tuple
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> props = lambda key: {'color': 'r' if 'a' in key else 'gray'}
>>> labelizer = lambda k: {('a',): 'first', ('b',): 'second',
('c',): 'third'}[k]
>>> mosaic(data, title='colored dictionary',
properties=props, labelizer=labelizer)
>>> pylab.show()
Using a DataFrame as source, specifying the name of the columns of interest
>>> gender = ['male', 'male', 'male', 'female', 'female', 'female']
>>> pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
>>> data = pandas.DataFrame({'gender': gender, 'pet': pet})
>>> mosaic(data, ['pet', 'gender'])
>>> pylab.show()
"""
from pylab import Rectangle
fig, ax = utils.create_mpl_ax(ax)
# normalize the data to a dict with tuple of strings as keys
data = _normalize_data(data, index)
# split the graph into different areas
rects = _hierarchical_split(data, horizontal=horizontal, gap=gap)
# if there is no specified way to create the labels
# create a default one
if labelizer is None:
labelizer = lambda k: "\n".join(k)
if statistic:
default_props = _statistical_coloring(data)
else:
default_props = _create_default_properties(data)
if isinstance(properties, dict):
color_dict = properties
properties = lambda key: color_dict.get(key, None)
for k, v in iteritems(rects):
# create each rectangle and put a label on it
x, y, w, h = v
conf = properties(k)
props = conf if conf else default_props[k]
text = labelizer(k)
Rect = Rectangle((x, y), w, h, label=text, **props)
ax.add_patch(Rect)
ax.text(x + w / 2, y + h / 2, text, ha='center',
va='center', size='smaller')
#creating the labels on the axis
#o clearing it
if axes_label:
if np.iterable(label_rotation):
rotation = label_rotation
else:
rotation = [label_rotation] * 4
labels = _create_labels(rects, horizontal, ax, rotation)
else:
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_title(title)
return fig, rects | bsd-3-clause |
lweasel/piquant | piquant/resource_usage.py | 1 | 3014 | import math
import pandas as pd
import os.path
PREQUANT_RESOURCE_TYPE = "prequant_usage"
QUANT_RESOURCE_TYPE = "quant_usage"
OVERALL_USAGE_PREFIX = "overall"
TIME_USAGE_TYPE = "time"
MEMORY_USAGE_TYPE = "memory"
class _ResourceUsageStatistic(object):
def __init__(self, name, usage_type, title,
format_string, units, value_extractor):
self.name = name
self.usage_type = usage_type
self.title = title
self.format_string = format_string
self.units = units
self.value_extractor = value_extractor
def get_value(self, usage_df):
return self.value_extractor(usage_df[self.name])
def stat_range(self, vals_range):
max_val = math.ceil(vals_range[1] * 2) / 2.0
return (0, max_val + 0.01)
def get_axis_label(self):
return "{t} ({u})".format(t=self.title, u=self.units)
_RESOURCE_USAGE_STATS = []
_RESOURCE_USAGE_STATS.append(_ResourceUsageStatistic(
"real-time", TIME_USAGE_TYPE,
"Log10 total elapsed real time", "%e", "s",
lambda x: math.log10(x.sum())))
_RESOURCE_USAGE_STATS.append(_ResourceUsageStatistic(
"user-time", TIME_USAGE_TYPE,
"Log10 total user mode time", "%U", "s",
lambda x: math.log10(x.sum())))
_RESOURCE_USAGE_STATS.append(_ResourceUsageStatistic(
"sys-time", TIME_USAGE_TYPE,
"Log10 total kernel mode time", "%S", "s",
lambda x: math.log10(x.sum())))
_RESOURCE_USAGE_STATS.append(_ResourceUsageStatistic(
"max-memory", MEMORY_USAGE_TYPE,
"Maximum resident memory", "%M", "Gb",
lambda x: x.max() / 1048576.0))
def get_resource_usage_statistics():
return set(_RESOURCE_USAGE_STATS)
def get_time_usage_statistics():
return [rus for rus in _RESOURCE_USAGE_STATS
if rus.usage_type == TIME_USAGE_TYPE]
def get_memory_usage_statistics():
return [rus for rus in _RESOURCE_USAGE_STATS
if rus.usage_type == MEMORY_USAGE_TYPE]
def get_time_command(resource_type):
output_file = get_resource_usage_file(resource_type)
format_string = ",".join(
[rus.format_string for rus in _RESOURCE_USAGE_STATS])
return ("/usr/bin/time -f \"\\\"%C\\\",{format_string}\" " +
"-o {output_file} -a ").format(
format_string=format_string, output_file=output_file)
def get_usage_summary(usage_file):
usage_info = pd.read_csv(
usage_file, header=None,
names=["command"] + [rus.name for rus in _RESOURCE_USAGE_STATS])
return pd.DataFrame([
{rus.name: rus.get_value(usage_info) for rus in _RESOURCE_USAGE_STATS}
])
def get_resource_usage_file(resource_type, prefix=None, directory=None):
file_name = resource_type + ".csv"
if prefix:
file_name = prefix + "_" + file_name
if directory:
file_name = os.path.join(directory, file_name)
return file_name
def write_usage_summary(usage_file_name, usage_summary):
with open(usage_file_name, "w") as out_file:
usage_summary.to_csv(out_file, index=False)
| mit |
kirangonella/BuildingMachineLearningSystemsWithPython | ch11/demo_corr.py | 25 | 2288 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from matplotlib import pylab
import numpy as np
import scipy
from scipy.stats import norm, pearsonr
from utils import CHART_DIR
def _plot_correlation_func(x, y):
r, p = pearsonr(x, y)
title = "Cor($X_1$, $X_2$) = %.3f" % r
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
f1 = scipy.poly1d(scipy.polyfit(x, y, 1))
pylab.plot(x, f1(x), "r--", linewidth=2)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])
def plot_correlation_demo():
np.random.seed(0) # to reproduce the data later on
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(0, 10, 0.2)
pylab.subplot(221)
y = 0.5 * x + norm.rvs(1, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x + norm.rvs(1, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x + norm.rvs(1, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = norm.rvs(1, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(-5, 5, 0.2)
pylab.subplot(221)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x ** 2 + norm.rvs(1, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = 0.5 * x ** 2 + norm.rvs(1, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_correlation_demo()
| mit |
mjgrav2001/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
zhuhuifeng/PyML | examples/pca.py | 1 | 1375 | try:
from sklearn.model_selection import train_test_split
except ImportError:
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification
from mla.linear_models import LogisticRegression
from mla.metrics import accuracy
from mla.pca import PCA
# logging.basicConfig(level=logging.DEBUG)
# Generate a random binary classification problem.
X, y = make_classification(n_samples=1000, n_features=100, n_informative=75,
random_state=1111, n_classes=2, class_sep=2.5, )
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=1111)
for s in ['svd', 'eigen']:
p = PCA(15, solver=s)
# fit PCA with training data, not entire dataset
p.fit(X_train)
X_train_reduced = p.transform(X_train)
X_test_reduced = p.transform(X_test)
model = LogisticRegression(lr=0.001, max_iters=2500)
model.fit(X_train_reduced, y_train)
predictions = model.predict(X_test_reduced)
print('Classification accuracy for %s PCA: %s'
% (s, accuracy(y_test, predictions)))
model = LogisticRegression(lr=0.001, max_iters=2500)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print('Classification accuracy for %s PCA: %s'
% (s, accuracy(y_test, predictions)))
| apache-2.0 |
pieleric/odemis | src/odemis/util/img.py | 2 | 47211 | # -*- coding: utf-8 -*-
"""
Created on 23 Aug 2012
@author: Éric Piel
Copyright © 2012-2013 Éric Piel & Kimon Tsitsikas, Delmic
This file is part of Odemis.
Odemis is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License version 2 as published by the Free
Software Foundation.
Odemis is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
Odemis. If not, see http://www.gnu.org/licenses/.
"""
# various functions to convert and modify images (as DataArray)
from __future__ import division
import logging
import math
import numpy
from odemis import model
import scipy.ndimage
import cv2
import copy
from odemis.model import DataArray
from odemis.model import MD_DWELL_TIME, MD_EXP_TIME, TINT_FIT_TO_RGB, TINT_RGB_AS_IS
from odemis.util import get_best_dtype_for_acc
from odemis.util.conversion import get_img_transformation_matrix, rgb_to_frgb
import matplotlib.colors as colors
from matplotlib import cm
# See if the optimised (cython-based) functions are available
try:
from odemis.util import img_fast
except ImportError:
logging.warn("Failed to load optimised functions, slow version will be used.")
img_fast = None
# This is a weave-based optimised version (but weave requires g++ installed)
#def DataArray2RGB_fast(data, irange, tint=(255, 255, 255)):
# """
# Do not call directly, use DataArray2RGB.
# Fast version of DataArray2RGB, which is based on C code
# """
# # we use weave to do the assignment in C code
# # this only gets compiled on the first call
# import scipy.weave as weave
# # ensure it's a basic ndarray, otherwise it confuses weave
# data = data.view(numpy.ndarray)
# w, h = data.shape
# ret = numpy.empty((w, h, 3), dtype=numpy.uint8)
# assert irange[0] < irange[1]
# irange = numpy.array(irange, dtype=data.dtype) # ensure it's the same type
# tintr = numpy.array([t / 255 for t in tint], dtype=numpy.float)
#
# # TODO: special code when tint == white (should be 2x faster)
# code = """
# int impos=0;
# int retpos=0;
# float b = 255. / float(irange[1] - irange[0]);
# float d;
# for(int j=0; j<Ndata[1]; j++)
# {
# for (int i=0; i<Ndata[0]; i++)
# {
# // clip
# if (data[impos] <= irange[0]) {
# d = 0;
# } else if (data[impos] >= irange[1]) {
# d = 255;
# } else {
# d = float(data[impos] - irange[0]) * b;
# }
# // Note: can go x2 faster if tintr is skipped
# ret[retpos++] = d * tintr[0];
# ret[retpos++] = d * tintr[1];
# ret[retpos++] = d * tintr[2];
# impos++;
# }
# }
# """
# weave.inline(code, ["data", "ret", "irange", "tintr"])
# return ret
def tint_to_md_format(tint):
"""
Given a tint of a stream, which could be an RGB tuple or colormap object,
put it into the format for metadata storage
tint argument can be:
- a list tuple RGB value (for a tint) or
- a matplotlib.colors.Colormap object for a custom color map or
- a string of value TINT_FIT_TO_RGB to indicate fit RGB color mapping
- a string of value TINT_RGB_AS_IS that indicates no tint. Will be converted to a black tint
returns (string or tuple) the tint name for metadata
"""
if isinstance(tint, tuple) or isinstance(tint, list):
return tint
elif isinstance(tint, colors.Colormap):
return tint.name
elif tint in (TINT_FIT_TO_RGB, TINT_RGB_AS_IS):
return tint
else:
raise ValueError("Unexpected tint %s" % (tint,))
def md_format_to_tint(user_tint):
"""
Given a string or tuple value of user_tint in saved metadata, convert to a tint object
Returns tint as:
- a list tuple RGB value (for a tint)
- a matplotlib.colors.Colormap object for a custom color map
- a string of value TINT_FIT_TO_RGB to indicate fit RGB color mapping
"""
if isinstance(user_tint, tuple) or isinstance(user_tint, list):
return user_tint
elif isinstance(user_tint, str):
if user_tint != TINT_FIT_TO_RGB:
try:
return cm.get_cmap(user_tint)
except NameError:
raise ValueError("Invalid tint metadata colormap value %s" % (user_tint,))
else:
return TINT_FIT_TO_RGB
else:
raise TypeError("Invalid tint metadata type %s" % (user_tint,))
def findOptimalRange(hist, edges, outliers=0):
"""
Find the intensity range fitting best an image based on the histogram.
hist (ndarray 1D of 0<=int): histogram
edges (tuple of 2 numbers): the values corresponding to the first and last
bin of the histogram. To get an index, use edges = (0, len(hist)).
outliers (0<float<0.5): ratio of outliers to discard (on both side). 0
discards no value, 0.5 discards every value (and so returns the median).
return (tuple of 2 values): the range (min and max values)
"""
# If we got an histogram with only one value, don't try too hard.
if len(hist) < 2:
return edges
if outliers == 0:
# short-cut if no outliers: find first and last non null value
inz = numpy.flatnonzero(hist)
try:
idxrng = inz[0], inz[-1]
except IndexError:
# No non-zero => data had no value => histogram of an empty array
return edges
else:
# accumulate each bin into the next bin
cum_hist = hist.cumsum()
nval = cum_hist[-1]
# If it's an histogram of an empty array, don't try too hard.
if nval == 0:
return edges
# trick: if there are lots (>1%) of complete black and not a single
# value just above it, it's a sign that the black is not part of the
# signal and so is all outliers
if hist[1] == 0 and cum_hist[0] / nval > 0.01 and cum_hist[0] < nval:
cum_hist -= cum_hist[0] # don't count 0's in the outliers
nval = cum_hist[-1]
# find out how much is the value corresponding to outliers
oval = int(round(outliers * nval))
lowv, highv = oval, nval - oval
# search for first bin equal or above lowv
lowi = numpy.searchsorted(cum_hist, lowv, side="right")
if hist[lowi] == lowv:
# if exactly lowv -> remove this bin too, otherwise include the bin
lowi += 1
# same with highv (note: it's always found, so highi is always
# within hist)
highi = numpy.searchsorted(cum_hist, highv, side="left")
idxrng = lowi, highi
# convert index into intensity values
a = edges[0]
b = (edges[1] - edges[0]) / (hist.size - 1)
# TODO: rng should be the same type as edges
rng = (a + b * idxrng[0], a + b * idxrng[1])
return rng
def getOutliers(data, outliers=0):
"""
Finds the minimum and maximum values when discarding a given percentage of outliers.
:param data: (DataArray) The data containing the image.
:param outliers: (0<float<0.5) Ratio of outliers to discard (on each side).
0 discards no value, 0.5 discards every value (and so returns the median).
:return: (tuple of 2 values) The range (min and max value).
"""
hist, edges = histogram(data)
return findOptimalRange(hist, edges, outliers)
def compactHistogram(hist, length):
"""
Make a histogram smaller by summing bins together
hist (ndarray 1D of 0<=int): histogram
length (0<int<=hist.size): final length required. It must be a multiple of
the length of hist
return (ndarray 1D of 0<=int): histogram representing the same bins, but
accumulated together as necessary to only have "length" bins.
"""
if hist.size < length:
raise ValueError("Cannot compact histogram of length %d to length %d" %
hist.size, length)
elif hist.size == length:
return hist
elif hist.size % length != 0:
# Very costly (in CPU time) and probably a sign something went wrong
logging.warning("Length of histogram = %d, not multiple of %d",
hist.size, length)
# add enough zeros at the end to make it a multiple
hist = numpy.append(hist, numpy.zeros(length - hist.size % length, dtype=hist.dtype))
# Reshape to have on first axis the length, and second axis the bins which
# must be accumulated.
chist = hist.reshape(length, hist.size // length)
return numpy.sum(chist, 1)
# TODO: compute histogram faster. There are several ways:
# * x=numpy.bincount(a.flat, minlength=depth) => fast (~0.03s for
# a 2048x2048 array) but only works on flat array with uint8 and uint16 and
# creates 2**16 bins if uint16 (so need to do a reshape and sum on top of it)
# * numpy.histogram(a, bins=256, range=(0,depth)) => slow (~0.09s for a
# 2048x2048 array) but works exactly as needed directly in every case.
# * see weave? (~ 0.01s for 2048x2048 array of uint16) eg:
# timeit.timeit("counts=numpy.zeros((2**16), dtype=numpy.uint32);
# weave.inline( code, ['counts', 'idxa'])", "import numpy;from scipy import weave; code=r\"for (int i=0; i<Nidxa[0]; i++) { COUNTS1( IDXA1(i)>>8)++; }\"; idxa=numpy.ones((2048*2048), dtype=numpy.uint16)+15", number=100)
# * see cython?
# for comparison, a.min() + a.max() are 0.01s for 2048x2048 array
def histogram(data, irange=None):
"""
Compute the histogram of the given image.
data (numpy.ndarray of numbers): greyscale image
irange (None or tuple of 2 unsigned int): min/max values to be found
in the data. None => auto (min, max will be detected from the data)
return hist, edges:
hist (ndarray 1D of 0<=int): number of pixels with the given value
Note that the length of the returned histogram is not fixed. If irange
is defined and data is integer, the length is always equal to
irange[1] - irange[0] + 1.
edges (tuple of numbers): lowest and highest bound of the histogram.
edges[1] is included in the bin. If irange is defined, it's the same
values.
"""
if irange is None:
if data.dtype.kind in "biu":
idt = numpy.iinfo(data.dtype)
irange = (idt.min, idt.max)
if data.itemsize > 2:
# range is too big to be used as is => look really at the data
irange = (int(data.view(numpy.ndarray).min()),
int(data.view(numpy.ndarray).max()))
else:
# cast to ndarray to ensure a scalar (instead of a DataArray)
irange = (data.view(numpy.ndarray).min(), data.view(numpy.ndarray).max())
# short-cuts (for the most usual types)
if data.dtype.kind in "bu" and irange[0] == 0 and data.itemsize <= 2 and len(data) > 0:
# TODO: for int (irange[0] < 0), treat as unsigned, and swap the first
# and second halves of the histogram.
# TODO: for 32 or 64 bits with full range, convert to a view looking
# only at the 2 high bytes.
length = irange[1] - irange[0] + 1
hist = numpy.bincount(data.flat, minlength=length)
edges = (0, hist.size - 1)
if edges[1] > irange[1]:
logging.warning("Unexpected value %d outside of range %s", edges[1], irange)
else:
if data.dtype.kind in "biu":
length = min(8192, irange[1] - irange[0] + 1)
else:
# For floats, it will automatically find the minimum and maximum
length = 256
hist, all_edges = numpy.histogram(data, bins=length, range=irange)
edges = (max(irange[0], all_edges[0]),
min(irange[1], all_edges[-1]))
return hist, edges
def guessDRange(data):
"""
Guess the data range of the data given.
data (None or DataArray): data on which to base the guess
return (2 values)
"""
if data.dtype.kind in "biu":
try:
depth = 2 ** data.metadata[model.MD_BPP]
if depth <= 1:
logging.warning("Data reports a BPP of %d",
data.metadata[model.MD_BPP])
raise ValueError() # fall back to data type
if data.dtype.kind == "i":
drange = (-depth // 2, depth // 2 - 1)
else:
drange = (0, depth - 1)
except (KeyError, ValueError):
idt = numpy.iinfo(data.dtype)
drange = (idt.min, idt.max)
else:
raise TypeError("Cannot guess drange for data of kind %s" % data.dtype.kind)
return drange
def isClipping(data, drange=None):
"""
Check whether the given image has clipping pixels. Clipping is detected
by checking if a pixel value is the maximum value possible.
data (numpy.ndarray): image to check
drange (None or tuple of 2 values): min/max possible values contained.
If None, it will try to guess it.
return (bool): True if there are some clipping pixels
"""
if drange is None:
drange = guessDRange(data)
return drange[1] in data
# TODO: try to do cumulative histogram value mapping (=histogram equalization)?
# => might improve the greys, but might be "too" clever
def DataArray2RGB(data, irange=None, tint=(255, 255, 255)):
"""
:param data: (numpy.ndarray of unsigned int) 2D image greyscale (unsigned
float might work as well)
:param irange: (None or tuple of 2 values) min/max intensities mapped
to black/white
None => auto (min, max are from the data);
0, max val of data => whole range is mapped.
min must be < max, and must be of the same type as data.dtype.
:param tint: Could be:
- (3-tuple of 0 < int <256) RGB colour of the final image (each
pixel is multiplied by the value. Default is white.
- colors.Colormap Object
:return: (numpy.ndarray of 3*shape of uint8) converted image in RGB with the
same dimension
"""
# TODO: handle signed values
assert(data.ndim == 2) # => 2D with greyscale
# Discard the DataArray aspect and just get the raw array, to be sure we
# don't get a DataArray as result of the numpy operations
data = data.view(numpy.ndarray)
# fit it to 8 bits and update brightness and contrast at the same time
if irange is None:
irange = (numpy.nanmin(data), numpy.nanmax(data))
if math.isnan(irange[0]):
logging.warning("Trying to convert all-NaN data to RGB")
data = numpy.nan_to_num(data)
irange = (0, 1)
else:
# ensure irange is the same type as the data. It ensures we don't get
# crazy values, and also that numpy doesn't get confused in the
# intermediary dtype (cf .clip()).
irange = numpy.array(irange, data.dtype)
# TODO: warn if irange looks too different from original value?
if irange[0] == irange[1]:
logging.info("Requested RGB conversion with null-range %s", irange)
# Determine if it is necessary to deal with the color map
# Otherwise, continue with the old method
if isinstance(tint, colors.Colormap):
# Normalize the data to the interval [0, 1.0]
# TODO: Add logarithmic normalization with LogNorm
# norm = colors.LogNorm(vmin=data.min(), vmax=data.max())
norm = colors.Normalize(vmin=irange[0], vmax=irange[1], clip=True)
rgb = tint(norm(data)) # returns an rgba array
rgb = rgb[:, :, :3] # discard alpha channel
out = numpy.empty(rgb.shape, dtype=numpy.uint8)
numpy.multiply(rgb, 255, casting='unsafe', out=out)
return out
if data.dtype == numpy.uint8 and irange[0] == 0 and irange[1] == 255:
# short-cut when data is already the same type
# logging.debug("Applying direct range mapping to RGB")
drescaled = data
# TODO: also write short-cut for 16 bits by reading only the high byte?
else:
# If data might go outside of the range, clip first
if data.dtype.kind in "iu":
# no need to clip if irange is the whole possible range
idt = numpy.iinfo(data.dtype)
# Ensure B&W if there is only one value allowed
if irange[0] >= irange[1]:
if irange[0] > idt.min:
irange = (irange[0] - 1, irange[0])
else:
irange = (irange[0], irange[0] + 1)
if img_fast:
try:
# only (currently) supports uint16
return img_fast.DataArray2RGB(data, irange, tint)
except ValueError as exp:
logging.info("Fast conversion cannot run: %s", exp)
except Exception:
logging.exception("Failed to use the fast conversion")
if irange[0] > idt.min or irange[1] < idt.max:
data = data.clip(*irange)
else: # floats et al. => always clip
# Ensure B&W if there is just one value allowed
if irange[0] >= irange[1]:
irange = (irange[0] - 1e-9, irange[0])
data = data.clip(*irange)
dshift = data - irange[0]
if data.dtype == numpy.uint8:
drescaled = dshift # re-use memory for the result
else:
# TODO: could directly use one channel of the 'rgb' variable?
drescaled = numpy.empty(data.shape, dtype=numpy.uint8)
# Ideally, it would be 255 / (irange[1] - irange[0]) + 0.5, but to avoid
# the addition, we can just use 255.99, and with the rounding down, it's
# very similar.
b = 255.99 / (irange[1] - irange[0])
numpy.multiply(dshift, b, out=drescaled, casting="unsafe")
# Now duplicate it 3 times to make it RGB (as a simple approximation of
# greyscale)
# dstack doesn't work because it doesn't generate in C order (uses strides)
# apparently this is as fast (or even a bit better):
# 0 copy (1 malloc)
rgb = numpy.empty(data.shape + (3,), dtype=numpy.uint8, order='C')
# Tint (colouration)
if tint == (255, 255, 255):
# fast path when no tint
# Note: it seems numpy.repeat() is 10x slower ?!
# a = numpy.repeat(drescaled, 3)
# a.shape = data.shape + (3,)
rgb[:, :, 0] = drescaled # 1 copy
rgb[:, :, 1] = drescaled # 1 copy
rgb[:, :, 2] = drescaled # 1 copy
else:
rtint, gtint, btint = tint
# multiply by a float, cast back to type of out, and put into out array
# TODO: multiplying by float(x/255) is the same as multiplying by int(x)
# and >> 8
numpy.multiply(drescaled, rtint / 255, out=rgb[:, :, 0], casting="unsafe")
numpy.multiply(drescaled, gtint / 255, out=rgb[:, :, 1], casting="unsafe")
numpy.multiply(drescaled, btint / 255, out=rgb[:, :, 2], casting="unsafe")
return rgb
def getColorbar(color_map, width, height, alpha=False):
"""
Returns an RGB gradient rectangle or colorbar (as numpy array with 2 dim of RGB tuples)
based on the color map inputed
color_map: (matplotlib colormap object)
width (int): pixel width of output rectangle
height (int): pixel height of output rectangle
alpha: (bool): set to true if you want alpha channel
return: numpy Array of uint8 RGB tuples
"""
assert isinstance(width, int) and width > 0
assert isinstance(height, int) and height > 0
gradient = numpy.linspace(0.0, 1.0, width)
gradient = numpy.tile(gradient, (height, 1))
gradient = color_map(gradient)
if not alpha:
gradient = gradient[:, :, :3] # discard alpha channel # convert to rgb
gradient = numpy.multiply(gradient, 255) # convert to rgb
return gradient.astype(numpy.uint8)
def tintToColormap(tint):
"""
Convert a tint to a matplotlib.colors.Colormap object
tint argument can be:
- a list tuple RGB value (for a tint) or
- a matplotlib.colors.Colormap object for a custom color map (then it is just returned as is) or
- a string of value TINT_FIT_TO_RGB to indicate fit RGB color mapping
- a string of value TINT_RGB_AS_IS that indicates no tint. Will be converted to a rainbow colormap
name (string): the name argument of the new colormap object
returns matplotlib.colors.Colormap object
"""
if isinstance(tint, colors.Colormap):
return tint
elif isinstance(tint, tuple) or isinstance(tint, list): # a tint RGB value
# make a gradient from black to the selected tint
tint = colors.LinearSegmentedColormap.from_list("",
[(0, 0, 0), rgb_to_frgb(tint)])
elif tint == TINT_RGB_AS_IS:
tint = cm.get_cmap('hsv')
elif tint == TINT_FIT_TO_RGB: # tint Fit to RGB constant
tint = colors.ListedColormap([(0, 0, 1), (0, 1, 0), (1, 0, 0)], 'Fit to RGB')
else:
raise TypeError("Invalid tint type: %s" % (tint,))
return tint
def getYXFromZYX(data, zIndex=0):
"""
Extracts an XY plane from a ZYX image at the index given by zIndex (int)
Returns the data array, which is now 2D. The metadata of teh resulting 2D
image is updated such that MD_POS reflects the position of the 3D slice.
data: an image DataArray typically with 3 dimensions
zIndex: the index of the XY plane to extract from the image.
returns: 2D image DataArray
"""
d = data.view()
if d.ndim < 2:
d.shape = (1,) * (2 - d.ndim) + d.shape
elif d.ndim > 2:
d.shape = d.shape[-3:] # raise ValueError if it will not work
d = d[zIndex] # Remove z
# Handle updating metadata
pxs = d.metadata.get(model.MD_PIXEL_SIZE)
pos = d.metadata.get(model.MD_POS)
if pxs is not None and pos is not None and len(pxs) == 3:
height = d.shape[0] * pxs[2] # ZYX order
if len(pos) == 3:
d.metadata[model.MD_POS] = (pos[0], pos[1], pos[2] - height / 2 + zIndex * pxs[2])
else:
logging.warning("Centre Position metadata missing third dimension. Assuming 0.")
d.metadata[model.MD_POS] = (pos[0], pos[1], -height / 2 + zIndex * pxs[2])
return d
def ensure2DImage(data):
"""
Reshape data to make sure it's 2D by trimming all the low dimensions (=1).
Odemis' convention is to have data organized as CTZYX. If CTZ=111, then it's
a 2D image, but it has too many dimensions for functions which want only 2D.
If it has a 3D pixel size (voxels) then it must be ZYX, so this should be handled.
data (DataArray): the data to reshape
return DataArray: view to the same data but with 2D shape
raise ValueError: if the data is not 2D (CTZ != 111)
"""
d = data.view()
if d.ndim < 2:
d.shape = (1,) * (2 - d.ndim) + d.shape
elif d.ndim > 2:
d.shape = d.shape[-2:] # raise ValueError if it will not work
return d
def RGB2Greyscale(data):
"""
Converts an RGB image to a greyscale image.
Note: it currently adds the 3 channels together, but this should not be
assumed to hold true.
data (ndarray of YX3 uint8): RGB image (alpha channel can be on the 4th channel)
returns (ndarray of YX uint16): a greyscale representation.
"""
if data.shape[-1] not in {3, 4}:
raise ValueError("Data passed has %d colour channels, which is not RGB" %
(data.shape[-1],))
if data.dtype != numpy.uint8:
logging.warning("RGB data should be uint8, but is %s type", data.dtype)
imgs = data[:, :, 0].astype(numpy.uint16)
imgs += data[:, :, 1]
imgs += data[:, :, 2]
return imgs
def ensureYXC(data):
"""
Ensure that a RGB image is in YXC order in memory, to fit RGB24 or RGB32
format.
data (DataArray): 3 dimensions RGB data
return (DataArray): same data, if necessary reordered in YXC order
"""
if data.ndim != 3:
raise ValueError("data has not 3 dimensions (%d dimensions)" % data.ndim)
md = data.metadata.copy()
dims = md.get(model.MD_DIMS, "CYX")
if dims == "CYX":
# CYX, change it to YXC, by rotating axes
data = numpy.rollaxis(data, 2) # XCY
data = numpy.rollaxis(data, 2) # YXC
dims = "YXC"
if not dims == "YXC":
raise NotImplementedError("Don't know how to handle dim order %s" % (dims,))
if data.shape[-1] not in {3, 4}:
logging.warning("RGB data has C dimension of length %d, instead of 3 or 4", data.shape[-1])
if data.dtype != numpy.uint8:
logging.warning("RGB data should be uint8, but is %s type", data.dtype)
data = numpy.ascontiguousarray(data) # force memory placement
md[model.MD_DIMS] = dims
return model.DataArray(data, md)
def rescale_hq(data, shape):
"""
Resize the image to the new given shape (smaller or bigger). It tries to
smooth the pixels. Metadata is updated.
data (DataArray or numpy.array): Data to be rescaled
shape (tuple): the new shape of the image. It needs to be the same length as the data.shape.
return (DataArray or numpy.array): The image rescaled. It has the same shape
as the 'shape' parameter. The returned object has the same type of the 'data' parameter
"""
if 0 in shape:
raise ValueError("Requested shape is %s, but it should be at least 1 px in each dimension" % (shape,))
scale = tuple(n / o for o, n in zip(data.shape, shape))
if hasattr(data, "metadata"):
dims = data.metadata.get(model.MD_DIMS, "CTZYX"[-data.ndim::])
ci = dims.find("C") # -1 if not found
else:
ci = -1
if data.ndim == 2 or (data.ndim == 3 and ci == 2 and scale[ci] == 1):
# TODO: if C is not last dim, reshape (ie, call ensureYXC())
# TODO: not all dtypes are supported by OpenCV (eg, uint32)
# This is a normal spatial image
if any(s < 1 for s in scale):
interpolation = cv2.INTER_AREA # Gives best looking when shrinking
else:
interpolation = cv2.INTER_LINEAR
# If a 3rd dim, OpenCV will apply the resize on each C independently
out = cv2.resize(data, (shape[1], shape[0]), interpolation=interpolation)
else:
# Weird number of dimensions => default to the less pretty but more
# generic scipy version
out = numpy.empty(shape, dtype=data.dtype)
scipy.ndimage.interpolation.zoom(data, zoom=scale, output=out, order=1, prefilter=False)
# Update the metadata
if hasattr(data, "metadata"):
out = model.DataArray(out, dict(data.metadata))
# update each metadata which is linked to the pixel size
# Metadata that needs to be divided by the scale (zoom => decrease)
for k in {model.MD_PIXEL_SIZE, model.MD_BINNING}:
try:
ov = data.metadata[k]
except KeyError:
continue
try:
out.metadata[k] = tuple(o / s for o, s in zip(ov, scale))
except Exception:
logging.exception("Failed to update metadata '%s' when rescaling by %s",
k, scale)
# Metadata that needs to be multiplied by the scale (zoom => increase)
for k in {model.MD_AR_POLE}:
try:
ov = data.metadata[k]
except KeyError:
continue
try:
out.metadata[k] = tuple(o * s for o, s in zip(ov, scale))
except Exception:
logging.exception("Failed to update metadata '%s' when rescaling by %s",
k, scale)
return out
def Subtract(a, b):
"""
Subtract 2 images, with clipping if needed
a (DataArray)
b (DataArray or scalar)
return (DataArray): a - b, with same dtype and metadata as a
"""
# TODO: see if it is more useful to upgrade the type to a bigger if overflow
if a.dtype.kind in "bu":
# avoid underflow so that 1 - 2 = 0 (and not 65536)
return numpy.maximum(a, b) - b
else:
# TODO handle under/over-flows with integer types (127 - (-1) => -128)
return a - b
def Bin(data, binning):
"""
Combines adjacent pixels together, by summing them, in a similar way that
it's done on a CCD.
data (DataArray of shape YX): the data to bin. The dimensions should be
multiple of the binning.
binning (1<=int, 1<=int): binning in X and Y
return (DataArray of shape Y'X', with the same dtype as data): all cluster of
pixels of binning are summed into a single pixel. If it goes above the maximum
value, it's clipped to this maximum value.
The metadata PIXEL_SIZE and BINNING are updated (multiplied by the binning).
If data has MD_BASELINE (the average minimum value), the entire data will
be subtracted so that MD_BASELINE is kept. In other words,
baseline * (Bx*By - 1) is subtracted. If it would lead to negative value,
then the data is clipped to 0 and MD_BASELINE adjusted (increased).
"""
assert data.ndim == 2
orig_dtype = data.dtype
orig_shape = data.shape
if binning[0] < 1 or binning[1] < 1:
raise ValueError("Binning must be > 0, but got %s" % (binning,))
# Reshape the data to go from YX to Y'ByX'Bx, so that we can sum on By and Bx
new_shape = orig_shape[0] // binning[1], orig_shape[1] // binning[0]
if (new_shape[0] * binning[1], new_shape[1] * binning[0]) != orig_shape:
raise ValueError("Data shape %s not multiple of binning %s" % (orig_shape, new_shape))
data = data.reshape(new_shape[0], binning[1], new_shape[1], binning[0])
data = numpy.sum(data, axis=(1, 3)) # uint64 (if data.dtype is int)
assert data.shape == new_shape
orig_bin = data.metadata.get(model.MD_BINNING, (1, 1))
data.metadata[model.MD_BINNING] = orig_bin[0] * binning[0], orig_bin[1] * binning[1]
if model.MD_PIXEL_SIZE in data.metadata:
pxs = data.metadata[model.MD_PIXEL_SIZE]
data.metadata[model.MD_PIXEL_SIZE] = pxs[0] * binning[0], pxs[1] * binning[1]
# Subtract baseline (aka black level) to avoid it from being multiplied,
# so instead of having "Sum(data) + Sum(bl)", we have "Sum(data) + bl".
try:
baseline = data.metadata[model.MD_BASELINE]
baseline_sum = binning[0] * binning[1] * baseline
# If the baseline is too high compared to the actual black, we
# could end up subtracting too much, and values would underflow
# => be extra careful and never subtract more than min value.
minv = float(data.min())
extra_bl = baseline_sum - baseline
if extra_bl > minv:
extra_bl = minv
logging.info("Baseline reported at %d * %d, but lower values found, so only subtracting %d",
baseline, orig_shape[0], extra_bl)
# Same as "data -= extra_bl", but also works if extra_bl < 0
numpy.subtract(data, extra_bl, out=data, casting="unsafe")
data.metadata[model.MD_BASELINE] = baseline_sum - extra_bl
except KeyError:
pass
# If int, revert to original type, with data clipped (not overflowing)
if orig_dtype.kind in "biu":
idtype = numpy.iinfo(orig_dtype)
data = data.clip(idtype.min, idtype.max).astype(orig_dtype)
return data
# TODO: use VIPS to be fast?
def Average(images, rect, mpp, merge=0.5):
"""
mix the given images into a big image so that each pixel is the average of each
pixel (separate operation for each colour channel).
images (list of RGB DataArrays)
merge (0<=float<=1): merge ratio of the first and second image (IOW: the
first image is weighted by merge and second image by (1-merge))
"""
# TODO: is ok to have a image = None?
# TODO: (once the operator callable is clearly defined)
raise NotImplementedError()
# TODO: add operator Screen
def mergeMetadata(current, correction=None):
"""
Applies the correction metadata to the current metadata.
This function is used in order to apply the correction metadata
generated by the overlay stream to the optical images.
In case there is some correction metadata (i.e. MD_*_COR) in the current
dict this is updated with the corresponding metadata found in correction
dict. However, if this particular metadata is not present in correction dict
while it exists in current dict, it remains as is and its current value is
used e.g. in fine alignment for Delphi, MD_ROTATION_COR of the SEM image is
already present in the current metadata to compensate for MD_ROTATION, thus
it is omitted in the correction metadata returned by the overlay stream.
current (dict): original metadata, it will be updated, with the *_COR
metadata removed if it was present.
correction (dict or None): metadata with correction information, if None,
will use current to find the correction metadata.
"""
if correction is not None:
current.update(correction)
# TODO: rotation and position correction should use addition, not subtraction
if model.MD_ROTATION_COR in current:
# Default rotation is 0 rad if not specified
rotation_cor = current[model.MD_ROTATION_COR]
rotation = current.get(model.MD_ROTATION, 0)
current[model.MD_ROTATION] = (rotation - rotation_cor) % (math.pi * 2)
if model.MD_POS_COR in current:
# Default position is (0, 0) if not specified
position_cor = current[model.MD_POS_COR]
position = current.get(model.MD_POS, (0, 0))
current[model.MD_POS] = (position[0] - position_cor[0],
position[1] - position_cor[1])
if model.MD_SHEAR_COR in current:
# Default shear is 0 if not specified
shear_cor = current[model.MD_SHEAR_COR]
shear = current.get(model.MD_SHEAR, 0)
current[model.MD_SHEAR] = shear - shear_cor
# There is no default pixel size (though in some case sensor pixel size can
# be used as a fallback)
if model.MD_PIXEL_SIZE in current:
pxs = current[model.MD_PIXEL_SIZE]
# Do the correction for 2D and 3D
pxs_cor = current.get(model.MD_PIXEL_SIZE_COR, (1,) * len(pxs))
current[model.MD_PIXEL_SIZE] = tuple(p * pc for p, pc in zip(pxs, pxs_cor))
elif model.MD_PIXEL_SIZE_COR in current:
logging.info("Cannot correct pixel size of data with unknown pixel size")
# remove correction metadata (to make it clear the correction has been applied)
for k in (model.MD_ROTATION_COR, model.MD_PIXEL_SIZE_COR, model.MD_POS_COR, model.MD_SHEAR_COR):
if k in current:
del current[k]
def getTilesSize(tiles):
"""
Get the size in pixels of the image formed by the tiles
tiles (tuple of tuple of DataArray): Tiles
return (h, w): The size in pixels of the image formed by the tiles
"""
# calculates the height of the image, summing the heights of the tiles of the first column
height = 0
for tile in tiles[0]:
height += tile.shape[0]
# calculates the width of the image, summing the width of the tiles of the first row
width = 0
for tiles_column in tiles:
width += tiles_column[0].shape[1]
return height, width
def getCenterOfTiles(tiles, result_shape):
""" Calculates the center of the result image
It is based on the formula for calculating the position of a pixel in world coordinates:
CT = CI + TMAT * DC
where:
CT: center of the tile in pixel coordinates
CI: center of the image in world coordinates
DC: delta of the centers in pixel coordinates
TMAT: transformation matrix
From the formula above, comes the following formula:
CI = CT - TMAT * DC,
which is used below
tiles (tuple of tuple of DataArray): Tiles
result_shape (height, width): Size in pixels of the result image from the tiles
return (x, y): Physical coordinates of the center of the image
"""
first_tile = tiles[0][0]
ft_md = first_tile.metadata
dims = ft_md.get(model.MD_DIMS, "CTZYX"[-first_tile.ndim::])
ft_shape = [first_tile.shape[dims.index('X')], first_tile.shape[dims.index('Y')]]
# center of the tile in pixel coordinates
center_tile_pixel = [d / 2 for d in ft_shape]
# center of the image in pixel coordinates
center_image_pixel = [d / 2 for d in result_shape[::-1]]
# distance between the center of the tile and the center of the image, in pixel coordinates
dist_centers_tile_pixels = [ct - ci for ct, ci in zip(center_tile_pixel, center_image_pixel)]
# converts the centers distance, so this variable can be multiplied by the transformation matrix
dist_centers_tile_pixels = numpy.matrix(dist_centers_tile_pixels).getT()
# transformation matrix
tmat = get_img_transformation_matrix(first_tile.metadata)
# distance of the centers converted to world coordinates
dist_centers_w = tmat * dist_centers_tile_pixels
# convert the variable from a numpy.matrix to a numpy.array
dist_centers_w = numpy.ravel(dist_centers_w)
# center of the tile in world coordinates
center_tile_w = first_tile.metadata[model.MD_POS]
# center of the image in world coordinates
image_pos = center_tile_w - dist_centers_w
return tuple(image_pos)
def mergeTiles(tiles):
""""
Merge tiles into one DataArray
tiles (tuple of tuple of DataArray): Tiles to be merged
return (DataArray): Merge of all the tiles
"""
first_tile = tiles[0][0]
ft_md = first_tile.metadata
result_shape = getTilesSize(tiles)
# TODO must work when the channel dimension is not the last
if first_tile.ndim == 3:
result_shape = result_shape + (first_tile.shape[2],)
result = numpy.empty(result_shape, dtype=first_tile.dtype)
result = model.DataArray(result, ft_md.copy())
width_sum = 0
# copy the tiles to the result image
for tiles_column in tiles:
tile_width = tiles_column[0].shape[1]
height_sum = 0
for tile in tiles_column:
tile_height = tile.shape[0]
bottom = height_sum + tile_height
right = width_sum + tile_width
result[height_sum:bottom, width_sum:right] = tile
height_sum += tile_height
width_sum += tile_width
result.metadata[model.MD_POS] = getCenterOfTiles(tiles, result_shape[:2])
return result
def getBoundingBox(content):
"""
Compute the physical bounding-box of the given DataArray(Shadow)
content (DataArray(Shadow)): The data of the image
return (tuple(minx, miny, maxx, maxy)): left,top,right,bottom positions in world coordinates where top < bottom and left < right
raise LookupError if metadata is not available
"""
# TODO: also handle if passed a 2D array of images? (as returned for pyramidal images)
md = content.metadata.copy()
mergeMetadata(md) # apply the corrections
# get the pixel size of the full image
try:
pxs = md[model.MD_PIXEL_SIZE]
except KeyError:
raise LookupError("Cannot compute physical coordinates without MD_PIXEL_SIZE")
if None in pxs:
# Some detectors set it to None when the dimensions are not
raise LookupError("Pixel size %s is not proper meters" % (pxs,))
dims = md.get(model.MD_DIMS, "CTZYX"[-content.ndim::])
img_shape = (content.shape[dims.index('X')], content.shape[dims.index('Y')])
# half shape on world coordinates
half_shape_wc = (img_shape[0] * pxs[0] / 2, img_shape[1] * pxs[1] / 2)
md_pos = md.get(model.MD_POS, (0.0, 0.0)) # center
rect = (
md_pos[0] - half_shape_wc[0],
md_pos[1] - half_shape_wc[1],
md_pos[0] + half_shape_wc[0],
md_pos[1] + half_shape_wc[1],
)
# TODO: if MD_SHEAR or MD_ROTATION => need more
# Compute the location of all the 4 corners, and then pick the bounding box of them
return rect
class ImageIntegrator(object):
"""
Integrate the images one after another. Once the first image is acquired, calculate the best type for fitting
the image to avoid saturation and overflow. At the end of acquisition, take the average of integrated data if
the detector is DT_NORMAL and subtract the baseline from the final integrated image.
"""
def __init__(self, steps):
"""
steps: (int) the total number of images that need to be integrated
"""
self.steps = steps # can be changed by the caller, on the fly
self._step = 0
self._img = None
self._best_dtype = None
def append(self, img):
"""
Integrate two images (the new acquired image with the previous integrated one if exists) and return the
new integrated image. It will reset the ._img after reaching the number of integration counts, notifying
that the integration of the acquired images is completed.
Args:
img(model.DataArray): the image that should be integrated with the previous (integrated) one, if exists
Returns:
img(model.DataArray): the integrated image with the updated metadata
"""
self._step += 1
if self._img is None:
orig_dtype = img.dtype
self._best_dtype = get_best_dtype_for_acc(orig_dtype, self.steps)
integ_img = img
self._img = integ_img
else:
integ_img = self._img
# The sum starts as a duplicate of the first image, on the second image received
if self._step == 2:
data = integ_img.astype(self._best_dtype, copy=True)
integ_img = model.DataArray(data, integ_img.metadata.copy())
numpy.add(integ_img, img, out=integ_img)
# update the metadata of the integrated image in every integration step
md = integ_img.metadata
self.add_integration_metadata(md, img.metadata)
# At the end of the acquisition, check if the detector type is DT_NORMAL and then take the average by
# dividing with the number of acquired images (integration count) for every pixel position and restoring
# the original dtype.
if self._step == self.steps:
det_type = md.get(model.MD_DET_TYPE, model.MD_DT_INTEGRATING)
if det_type == model.MD_DT_NORMAL: # SEM
orig_dtype = img.dtype
if orig_dtype.kind in "biu":
integ_img = numpy.floor_divide(integ_img, self._step, dtype=orig_dtype, casting='unsafe')
else:
integ_img = numpy.true_divide(integ_img, self._step, dtype=orig_dtype, casting='unsafe')
elif det_type != model.MD_DT_INTEGRATING: # not optical either
logging.warning("Unknown detector type %s for image integration.", det_type)
# The baseline, if exists, should also be subtracted from the integrated image.
if model.MD_BASELINE in md:
integ_img, md = self.subtract_baseline(integ_img, md)
integ_img = model.DataArray(integ_img, md)
self._img = integ_img
# reset the ._img and ._step once you reach the integration count
if self._step >= self.steps:
self._step = 0
self._img = None
return integ_img
def add_integration_metadata(self, mda, mdb):
"""
add mdb to mda, and update mda with the result
returns dict: mda, which has been updated
"""
if MD_DWELL_TIME in mda:
mda[model.MD_DWELL_TIME] += mdb.get(model.MD_DWELL_TIME, 0)
if MD_EXP_TIME in mda:
mda[model.MD_EXP_TIME] += mdb.get(model.MD_EXP_TIME, 0)
mda[model.MD_INTEGRATION_COUNT] = mda.get(model.MD_INTEGRATION_COUNT, 1) + mdb.get(model.MD_INTEGRATION_COUNT, 1)
return mda
def subtract_baseline(self, data, md):
"""
Subtract accumulated baselines from the data so that the data only has one.
Args:
data: the data after the integration of all images
md: metadata of the integrated image
Returns:
data, md: the updated data and metadata after the subtraction of the baseline
"""
baseline = md[model.MD_BASELINE]
# Subtract the baseline (aka black level) from the final integrated image.
# Remove the baseline from n-1 images, keep one baseline as bg level.
minv = float(data.min())
# If the baseline is too high compared to the actual black, we could end up subtracting too much,
# and values would underflow => be extra careful and never subtract more than the min value.
baseline_sum = self._step * baseline # sum of baselines for n images.
extra_bl = (self._step - 1) * baseline # sum of baselines for n-1 images, keep one baseline as bg level.
# check if we underflow the data values
if extra_bl > minv:
extra_bl = minv
logging.info("Baseline reported at %d * %d, but lower values found, so only subtracting %d",
baseline, self.steps, extra_bl)
# Same as "data -= extra_bl", but also works if extra_bl < 0
numpy.subtract(data, extra_bl, out=data, casting="unsafe")
# replace the metadata of the image
md[model.MD_BASELINE] = baseline_sum - extra_bl
return data, md
def assembleZCube(images, zlevels):
"""
Construct xyz cube from a z stack of images
:param images: (list of DataArray of shape YX) list of z ordered images
:param zlevels: (list of float) list of focus positions
:return: (DataArray of shape ZYX) the data array of the xyz cube
"""
# images is a list of 3 dim data arrays.
# Will fail on purpose if the images contain more than 2 dimensions
ret = numpy.array([im.reshape(im.shape[-2:]) for im in images])
# Add back metadata
metadata3d = copy.copy(images[0].metadata)
# Extend pixel size to 3D
ps_x, ps_y = metadata3d[model.MD_PIXEL_SIZE]
ps_z = (zlevels[-1] - zlevels[0]) / (len(zlevels) - 1) if len(zlevels) > 1 else 1e-6
# Compute cube centre
c_x, c_y = metadata3d[model.MD_POS]
c_z = (zlevels[0] + zlevels[-1]) / 2 # Assuming zlevels are ordered
metadata3d[model.MD_POS] = (c_x, c_y, c_z)
# For a negative pixel size, convert to a positive and flip the z axis
if ps_z < 0:
ret = numpy.flipud(ret)
ps_z = -ps_z
metadata3d[model.MD_PIXEL_SIZE] = (ps_x, ps_y, ps_z)
metadata3d[model.MD_DIMS] = "ZYX"
ret = DataArray(ret, metadata3d)
return ret | gpl-2.0 |
arindam1993/PyBioSim | Simulator.py | 1 | 5869 | '''
(c) 2015 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see the LICENSE.txt file included with this software for more information
authors: Arindam Bose ([email protected]), Tucker Balch ([email protected])
'''
import numpy as np
import matplotlib.pyplot as plt
from numpy import *
from World import *
from Agent import Agent, RestrictedAgent
from Obstacle import *
from pylab import *
from Ball import Ball
from LinearAlegebraUtils import distBetween
from PreyBrain import PreyBrain
from PredatorBrain import PredatorBrain
from Team import Team
from SimTime import SimTime
from StatsTracker import StatsTracker
import random
#Called once for initialization
'''
Usage guidelines:
1. Define globals required for the simulation in the __init__ constructor, here we define a bunch of waypoints for the ball
2. Initialize the globals in the setup() method.
'''
class Simulator(object):
def __init__(self, world, simTime, fps, imageDirName):
self.world = world
self.simTime = simTime
self.fps = fps
self.imageDirName = imageDirName
self.currWP = 0
self.ballWPs = [array([50.0, -100.0, 0.0]), array([0.0, 100.0, -70.0]), array([50.0, 20.0, 100.0]),array([-30.0, 50.0, -100.0]), array([80.0, -50.0, 50.0]), array([80.0, -50.0, -50.0]), array([-65.0, 20.0, 50.0]), array([-50.0, 20.0, -60.0])]
def setup(self):
#setup directory to save the images
try:
os.mkdir(self.imageDirName)
except:
print self.imageDirName + " subdirectory already exists. OK."
#define teams which the agents can be a part of
predator = Team("Predator", '#ff99ff')
prey = Team("Prey", '#ffcc99')
#Defining a couple of agents
#predator and prey counts
predatorCount = 5
preyCount = 10
displacement = array([0, 20, 0])
#initial seed positions
predatorPos = array([20, 0, 0])
preyPos = array([0, 0, 0])
#set seed for randomly placing predators
random.seed(20)
#initialize predators
for i in range(0, predatorCount):
brain = PredatorBrain()
x = random.random() * 30
y = random.random() * 30
z = random.random() * 30
newDisplacement = array([x, y, z])
agent = Agent(predator, predatorPos, array([0, 0, 0]), brain, 5, 5, 5)
self.world.addAgent(agent)
predatorPos+=newDisplacement
#initialize prey
for i in range(0, preyCount):
brain = PreyBrain()
agent = RestrictedAgent(prey, preyPos, array([0, 0, 0]), brain, 2, 200, 2, 2)
self.world.addAgent(agent)
preyPos+=displacement
#
#define a bunch of obstacles
ob1Pos = array([-50,-50,-50])
ob1 = Obstacle(ob1Pos, 30)
ob2Pos = array([80,-50,-50])
ob2 = Obstacle(ob2Pos, 20)
originRef = Obstacle(array([0.1, 0.1, 0.1]), 10)
#add obstacles to the world
self.world.addObstacle(ob1)
self.world.addObstacle(originRef)
#called at a fixed 30fps always
def fixedLoop(self):
for agent in self.world.agents:
agent.moveAgent(self.world)
for ball in self.world.balls:
if len(self.ballWPs) > 0:
ball.moveBall(self.ballWPs[self.currWP], 1)
if distBetween(ball.position, self.ballWPs[self.currWP]) < 0.5:
self.currWP = (self.currWP + 1)%len(self.ballWPs)
# if len(self.ballWPs) > 0:
# self.ballWPs.remove(self.ballWPs[0])
#Called at specifed fps
def loop(self, ax):
self.world.draw(ax)
def run(self):
#Run setup once
self.setup()
#Setup loop
timeStep = 1/double(30)
frameProb = double(self.fps) / 30
currTime = double(0)
SimTime.fixedDeltaTime = timeStep
SimTime.deltaTime = double(1/ self.fps)
drawIndex = 0
physicsIndex = 0
while(currTime < self.simTime):
self.fixedLoop()
SimTime.time = currTime
currProb = double(drawIndex)/double(physicsIndex+1)
if currProb < frameProb:
self.drawFrame(drawIndex)
drawIndex+=1
physicsIndex+=1
currTime+=double(timeStep)
print "Physics ran for "+str(physicsIndex)+" steps"
print "Drawing ran for "+str(drawIndex)+" steps"
print "Agents were stunned for"+str(StatsTracker.stunTimeDict)
def drawFrame(self, loopIndex):
fig = plt.figure(figsize=(16,12))
ax = fig.add_subplot(111, projection='3d')
ax.view_init(elev = 30)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
fname = self.imageDirName + '/' + str(int(100000000+loopIndex)) + '.png' # name the file
self.loop(ax)
plt.gca().set_ylim(ax.get_ylim()[::-1])
savefig(fname, format='png', bbox_inches='tight')
print 'Written Frame No.'+ str(loopIndex)+' to '+ fname
plt.close()
#Simulation runs here
#set the size of the world
world = World(150, 150)
#specify which world to simulate, total simulation time, and frammerate for video
sim = Simulator(world, 120, 30, "images")
#run the simulation
sim.run()
'''
To create a video using the image sequence, execute the following command in command line.
>ffmpeg -framerate 30 -i "1%08d.png" -r 30 outPut.mp4
^ ^
Framerate mtached with simulator
Make sure to set your current working directory to /images and have ffmpeg in your path.
'''
| bsd-3-clause |
JasonKessler/scattertext | scattertext/Corpus.py | 1 | 3140 | import numpy as np
import pandas as pd
from numpy import nonzero
from scattertext.TermDocMatrix import TermDocMatrix
class Corpus(TermDocMatrix):
def __init__(self,
X,
mX,
y,
term_idx_store,
category_idx_store,
metadata_idx_store,
raw_texts,
unigram_frequency_path=None):
'''
Parameters
----------
X : csr_matrix
term document matrix
mX : csr_matrix
metadata-document matrix
y : np.array
category index array
term_idx_store : IndexStore
Term indices
category_idx_store : IndexStore
Catgory indices
metadata_idx_store : IndexStore
Document metadata indices
raw_texts : np.array or pd.Series
Raw texts
unigram_frequency_path : str or None
Path to term frequency file.
'''
TermDocMatrix.__init__(self, X, mX, y,
term_idx_store,
category_idx_store,
metadata_idx_store,
unigram_frequency_path)
self._raw_texts = raw_texts
def get_texts(self):
'''
Returns
-------
pd.Series, all raw documents
'''
return self._raw_texts
def get_doc_indices(self):
return self._y.astype(int)
def search(self, ngram):
'''
Parameters
----------
ngram str or unicode, string to search for
Returns
-------
pd.DataFrame, {'texts': <matching texts>, 'categories': <corresponding categories>}
'''
mask = self._document_index_mask(ngram)
return pd.DataFrame({
'text': self.get_texts()[mask],
'category': [self._category_idx_store.getval(idx)
for idx in self._y[mask]]
})
def search_index(self, ngram):
"""
Parameters
----------
ngram str or unicode, string to search for
Returns
-------
np.array, list of matching document indices
"""
return nonzero(self._document_index_mask(ngram))[0]
def _document_index_mask(self, ngram):
idx = self._term_idx_store.getidxstrict(ngram.lower())
mask = (self._X[:, idx] > 0).todense().A1
return mask
def _make_new_term_doc_matrix(self,
new_X=None,
new_mX=None,
new_y=None,
new_term_idx_store=None,
new_category_idx_store=None,
new_metadata_idx_store=None,
new_y_mask=None):
X, mX, y = self._update_X_mX_y(new_X, new_mX, new_y, new_y_mask)
return Corpus(X=X,
mX=mX,
y=y,
term_idx_store=new_term_idx_store if new_term_idx_store is not None else self._term_idx_store,
category_idx_store=new_category_idx_store if new_category_idx_store is not None else self._category_idx_store,
metadata_idx_store=new_metadata_idx_store if new_metadata_idx_store is not None else self._metadata_idx_store,
raw_texts=np.array(self.get_texts())[new_y_mask] if new_y_mask is not None else self.get_texts(),
unigram_frequency_path=self._unigram_frequency_path)
| apache-2.0 |
stadelmanma/OpenPNM | OpenPNM/Postprocessing/Plots.py | 2 | 12355 | import scipy as _sp
import matplotlib.pylab as _plt
def profiles(network, fig=None, values=None, bins=[10, 10, 10]):
r"""
Compute the profiles for the property of interest and plots it in all
three dimensions
Parameters
----------
network : OpenPNM Network object
values : array_like
The pore property values to be plotted as a profile
bins : int or list of ints, optional
The number of bins to divide the domain into for averaging.
"""
if fig is None:
fig = _plt.figure()
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
ax = [ax1, ax2, ax3]
xlab = ['x coordinate', 'y_coordinate', 'z_coordinate']
for n in [0, 1, 2]:
n_min, n_max = [_sp.amin(network['pore.coords'][:, n]),
_sp.amax(network['pore.coords'][:, n])]
steps = _sp.linspace(n_min, n_max, bins[n]+1, endpoint=True)
vals = _sp.zeros_like(steps)
for i in range(0, len(steps)-1):
temp = (network['pore.coords'][:, n] > steps[i]) * \
(network['pore.coords'][:, n] <= steps[i+1])
vals[i] = _sp.mean(values[temp])
yaxis = vals[:-1]
xaxis = (steps[:-1] + (steps[1]-steps[0])/2)/n_max
ax[n].plot(xaxis, yaxis, 'bo-')
ax[n].set_xlabel(xlab[n])
ax[n].set_ylabel('Slice Value')
return fig
def porosity_profile(network, fig=None, axis=2):
r"""
Compute and plot the porosity profile in all three dimensions
Parameters
----------
network : OpenPNM Network object
axis : integer type 0 for x-axis, 1 for y-axis, 2 for z-axis
Notes
-----
the area of the porous medium at any position is calculated from the
maximum pore coordinates in each direction
"""
if fig is None:
fig = _plt.figure()
L_x = _sp.amax(network['pore.coords'][:, 0]) + \
_sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
L_y = _sp.amax(network['pore.coords'][:, 1]) + \
_sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
L_z = _sp.amax(network['pore.coords'][:, 2]) + \
_sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
if axis is 0:
xlab = 'x-direction'
area = L_y*L_z
elif axis is 1:
xlab = 'y-direction'
area = L_x*L_z
else:
axis = 2
xlab = 'z-direction'
area = L_x*L_y
n_max = _sp.amax(network['pore.coords'][:, axis]) + \
_sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
steps = _sp.linspace(0, n_max, 100, endpoint=True)
vals = _sp.zeros_like(steps)
p_area = _sp.zeros_like(steps)
t_area = _sp.zeros_like(steps)
rp = ((21/88.0)*network['pore.volume'])**(1/3.0)
p_upper = network['pore.coords'][:, axis] + rp
p_lower = network['pore.coords'][:, axis] - rp
TC1 = network['throat.conns'][:, 0]
TC2 = network['throat.conns'][:, 1]
t_upper = network['pore.coords'][:, axis][TC1]
t_lower = network['pore.coords'][:, axis][TC2]
for i in range(0, len(steps)):
p_temp = (p_upper > steps[i])*(p_lower < steps[i])
t_temp = (t_upper > steps[i])*(t_lower < steps[i])
p_area[i] = sum((22/7.0)*(rp[p_temp]**2 -
(network['pore.coords'][:, axis][p_temp]-steps[i])**2))
t_area[i] = sum(network['throat.area'][t_temp])
vals[i] = (p_area[i]+t_area[i])/area
yaxis = vals
xaxis = steps/n_max
_plt.plot(xaxis, yaxis, 'bo-')
_plt.xlabel(xlab)
_plt.ylabel('Porosity')
return fig
def saturation_profile(network, phase, fig=None, axis=2):
r"""
Compute and plot the saturation profile in all three dimensions
Parameters
----------
network : OpenPNM Network object
phase : the invading or defending phase to plot its saturation distribution
axis : integer type 0 for x-axis, 1 for y-axis, 2 for z-axis
"""
if fig is None:
fig = _plt.figure()
if phase is None:
raise Exception('The phase for saturation profile plot is not given')
if axis is 0:
xlab = 'x-direction'
elif axis is 1:
xlab = 'y-direction'
else:
axis = 2
xlab = 'z-direction'
n_max = _sp.amax(network['pore.coords'][:, axis]) + \
_sp.mean(((21/88.0)*network['pore.volume'])**(1/3.0))
steps = _sp.linspace(0, n_max, 100, endpoint=True)
p_area = _sp.zeros_like(steps)
op_area = _sp.zeros_like(steps)
t_area = _sp.zeros_like(steps)
ot_area = _sp.zeros_like(steps)
vals = _sp.zeros_like(steps)
PO = phase['pore.occupancy']
TO = phase['throat.occupancy']
rp = ((21/88.0)*network['pore.volume'])**(1/3.0)
p_upper = network['pore.coords'][:, axis] + rp
p_lower = network['pore.coords'][:, axis] - rp
TC1 = network['throat.conns'][:, 0]
TC2 = network['throat.conns'][:, 1]
t_upper = network['pore.coords'][:, axis][TC1]
t_lower = network['pore.coords'][:, axis][TC2]
for i in range(0, len(steps)):
op_temp = (p_upper > steps[i])*(p_lower < steps[i])*PO
ot_temp = (t_upper > steps[i])*(t_lower < steps[i])*TO
op_temp = _sp.array(op_temp, dtype='bool')
ot_temp = _sp.array(op_temp, dtype='bool')
p_temp = (p_upper > steps[i])*(p_lower < steps[i])
t_temp = (t_upper > steps[i])*(t_lower < steps[i])
op_area[i] = sum((22/7.0)*(rp[op_temp]**2 -
(network['pore.coords'][:, axis][op_temp]-steps[i])**2))
ot_area[i] = sum(network['throat.area'][ot_temp])
p_area[i] = sum((22/7.0)*(rp[p_temp]**2 -
(network['pore.coords'][:, axis][p_temp]-steps[i])**2))
t_area[i] = sum(network['throat.area'][t_temp])
vals[i] = (op_area[i]+ot_area[i])/(p_area[i]+t_area[i])
if vals[i] > 1:
vals[i] = 1.
if _sp.isnan(vals[i]):
vals[i] = 1.
if vals[-1] == 1.:
vals = vals[::-1]
yaxis = vals
xaxis = steps/n_max
_plt.plot(xaxis, yaxis, 'bo-')
_plt.xlabel(xlab)
_plt.ylabel('Saturation')
return fig
def distributions(obj,
throat_diameter='throat.diameter',
pore_diameter='pore.diameter',
throat_length='throat.length'):
r"""
Plot a montage of key network size distribution histograms
Parameters
----------
obj : OpenPNM Object
This object can either be a Network or a Geometry. If a Network is
sent, then the histograms will display the properties for the entire
Network. If a Geometry is sent, then only it's properties will be
shown.
throat_diameter : string
Dictionary key to the array containing throat diameter values
pore_diameter : string
Dictionary key to the array containing pore diameter values
throat_length : string
Dictionary key to the array containing throat length values
"""
fig = _plt.figure()
fig.subplots_adjust(hspace=0.4)
fig.subplots_adjust(wspace=0.4)
pores = obj._net.pores(obj.name)
throats = obj._net.throats(obj.name)
net = obj._net
ax1 = fig.add_subplot(221)
ax1.hist(net[pore_diameter][pores], 25, facecolor='green')
ax1.set_xlabel('Pore Diameter')
ax1.set_ylabel('Frequency')
ax1.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
ax2 = fig.add_subplot(222)
x = net.num_neighbors(pores, flatten=False)
ax2.hist(x, 25, facecolor='yellow')
ax2.set_xlabel('Coordination Number')
ax2.set_ylabel('Frequency')
ax3 = fig.add_subplot(223)
ax3.hist(net[throat_diameter][throats], 25, facecolor='blue')
ax3.set_xlabel('Throat Diameter')
ax3.set_ylabel('Frequency')
ax3.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
ax4 = fig.add_subplot(224)
ax4.hist(net[throat_length][throats], 25, facecolor='red')
ax4.set_xlabel('Throat Length')
ax4.set_ylabel('Frequency')
ax4.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
return fig
def pore_size_distribution(network, fig=None):
r"""
Plot the pore and throat size distribution which is the accumulated
volume vs. the diameter in a semilog plot
Parameters
----------
network : OpenPNM Network object
"""
if fig is None:
fig = _plt.figure()
dp = network['pore.diameter']
Vp = network['pore.volume']
dt = network['throat.diameter']
Vt = network['throat.volume']
dmax = max(max(dp), max(dt))
steps = _sp.linspace(0, dmax, 100, endpoint=True)
vals = _sp.zeros_like(steps)
for i in range(0, len(steps)-1):
temp1 = dp > steps[i]
temp2 = dt > steps[i]
vals[i] = sum(Vp[temp1]) + sum(Vt[temp2])
yaxis = vals
xaxis = steps
_plt.semilogx(xaxis, yaxis, 'b.-')
_plt.xlabel('Pore & Throat Diameter (m)')
_plt.ylabel('Cumulative Volume (m^3)')
return fig
def drainage_curves(inv_alg, fig=None, Pc='inv_Pc', sat='inv_sat',
seq='inv_seq', timing=None):
r"""
Plot a montage of key saturation plots
Parameters
----------
inv_alg : OpenPNM Algorithm Object
The invasion algorithm for which the graphs are desired
timing : string
if algorithm keeps track of simulated time, insert string here
"""
inv_throats = inv_alg.toindices(inv_alg['throat.' + seq] > 0)
sort_seq = _sp.argsort(inv_alg['throat.'+seq][inv_throats])
inv_throats = inv_throats[sort_seq]
if fig is None:
fig = _plt.figure(num=1, figsize=(13, 10), dpi=80,
facecolor='w', edgecolor='k')
ax1 = fig.add_subplot(231) # left
ax2 = fig.add_subplot(232) # middle
ax3 = fig.add_subplot(233) # right
ax4 = fig.add_subplot(234) # left
ax5 = fig.add_subplot(235) # middle
ax6 = fig.add_subplot(236) # right
ax1.plot(inv_alg['throat.' + Pc][inv_throats],
inv_alg['throat.' + sat][inv_throats])
ax1.set_xlabel('Capillary Pressure (Pa)')
ax1.set_ylabel('Saturation')
ax1.set_ylim([0, 1])
ax1.set_xlim([0.99*min(inv_alg['throat.' + Pc][inv_throats]),
1.01*max(inv_alg['throat.' + Pc][inv_throats])])
ax2.plot(inv_alg['throat.' + seq][inv_throats],
inv_alg['throat.' + sat][inv_throats])
ax2.set_xlabel('Simulation Step')
ax2.set_ylabel('Saturation')
ax2.set_ylim([0, 1])
ax2.set_xlim([0, 1.01*max(inv_alg['throat.' + seq][inv_throats])])
if timing is None:
ax3.plot(0, 0)
ax3.set_xlabel('No Time Data Available')
else:
ax3.plot(inv_alg['throat.' + timing][inv_throats],
inv_alg['throat.' + sat][inv_throats])
ax3.set_xlabel('Time (s)')
ax3.set_ylabel('Saturation')
ax3.set_ylim([0, 1])
ax3.set_xlim([0, 1.01*max(inv_alg['throat.' + timing][inv_throats])])
ax4.plot(inv_alg['throat.' + sat][inv_throats],
inv_alg['throat.' + Pc][inv_throats])
ax4.set_ylabel('Capillary Pressure (Pa)')
ax4.set_xlabel('Saturation')
ax4.set_xlim([0, 1])
ax4.set_ylim([0.99*min(inv_alg['throat.' + Pc][inv_throats]),
1.01*max(inv_alg['throat.' + Pc][inv_throats])])
ax5.plot(inv_alg['throat.' + seq][inv_throats],
inv_alg['throat.' + Pc][inv_throats])
ax5.set_xlabel('Simulation Step')
ax5.set_ylabel('Capillary Pressure (Pa)')
ax5.set_ylim([0.99*min(inv_alg['throat.' + Pc][inv_throats]),
1.01*max(inv_alg['throat.' + Pc][inv_throats])])
ax5.set_xlim([0, 1.01*max(inv_alg['throat.' + seq][inv_throats])])
if timing is None:
ax6.plot(0, 0)
ax6.set_xlabel('No Time Data Available')
else:
ax6.plot(inv_alg['throat.' + timing][inv_throats],
inv_alg['throat.' + Pc][inv_throats])
ax6.set_xlabel('Time (s)')
ax6.set_ylabel('Capillary Pressure (Pa)')
ax6.set_ylim([0.99*min(inv_alg['throat.' + Pc][inv_throats]),
1.01*max(inv_alg['throat.' + Pc][inv_throats])])
ax6.set_xlim([0, 1.01*max(inv_alg['throat.' + timing][inv_throats])])
fig.subplots_adjust(left=0.08, right=0.99, top=0.95, bottom=0.1)
ax1.grid(True)
ax2.grid(True)
ax3.grid(True)
ax4.grid(True)
ax5.grid(True)
ax6.grid(True)
return fig
| mit |
moutai/scikit-learn | sklearn/ensemble/voting_classifier.py | 8 | 8679 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
from ..exceptions import NotFittedError
from ..utils.validation import check_is_fitted
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array-like, shape = [n_predictions]
The classes labels.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.estimators is None or len(self.estimators) == 0:
raise AttributeError('Invalid `estimators` attribute, `estimators`'
' should be a list of (string, estimator)'
' tuples')
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions.astype('int'))
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators_')
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
IntelLabs/hpat | examples/series_loc/series_loc_multiple_result.py | 1 | 1789 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
Expected Series:
0 5
0 3
0 1
dtype: int64
"""
import numpy as np
import pandas as pd
from numba import njit
@njit
def series_loc_many_idx():
series = pd.Series([5, 4, 3, 2, 1], index=[0, 2, 0, 6, 0])
return series.loc[0]
print(series_loc_many_idx())
| bsd-2-clause |
paix120/DataScienceLearningClubActivities | Activity01/Activity1b.py | 1 | 4029 | # import pandas and numpy libraries
import pandas as pd
# read in text file, which is tab delimited, and set global unique indentifier as the index column
df = pd.read_csv('ebird_rubythroated_Jan2013_Aug2015.txt', sep='\t', index_col='GLOBAL UNIQUE IDENTIFIER',
error_bad_lines=False, warn_bad_lines=True)
# error on row 36575
# pandas.parser.CParserError: Error tokenizing data. C error: Expected 44 fields in line 36575, saw 45
# set error_bad_lines to false so it would just skip that line and keep going
# force it to display wider in the console instead of wrapping so narrowly
pd.set_option('display.width', 250)
# set pandas to output all of the columns in output (was truncating)
pd.options.display.max_columns = 50
# display the list of data columns
print(df.columns)
# display the first 10 rows to view example data (this time all columns)
print(df.head(n=10))
# display summary stats (at least for numeric columns)
print('\nSummary statistics:')
print(df.describe())
# check to make sure unique identifier is unique
print('\nIndex unique? ' + str(df.index.is_unique))
# data types of columns
print('\nColumn Data Types:')
print(df.dtypes)
# how many empty/null values in each column?
print('\nCount of null values by column (columns with no nulls not displayed): ')
# display the columns with blank values (skip others)
print(df.isnull().sum()[df.isnull().sum() > 0])
# find/print that one row with a null locality value
print('\n1 Row with null Locality: ')
print(df.loc[df['LOCALITY'].isnull()].transpose())
# pandas dataframe row-col indexing - print the 1000th row, 13th column
print('\n1000th Row, 12th column (State/Province): ' + df.ix[1000][11])
# get the count for each bird species' sightings (by common name)
cn = pd.Series(df['COMMON NAME']).value_counts()
print('\nNumber of Ruby-Throated Hummingbird sightings (rows) in dataset:')
print(cn)
# data dictionary says X indicates uncounted presence, so let's call them 1s for now
# (note: may be bad for analysis, just exploring now)
df_counting = pd.DataFrame(df['OBSERVATION COUNT'].replace(to_replace='X', value='1'))
# rename the column so it will have a different name than original column after merge
df_counting.rename(columns={'OBSERVATION COUNT': 'OBS COUNT MODIFIED'}, inplace=True)
df = pd.concat([df, df_counting], axis=1)
# print(df)
# view the earliest and latest observation date in the dataset
print('\nDate range of observations: ' + str(df['OBSERVATION DATE'].min()) + ' - ' + str(df['OBSERVATION DATE'].max()))
# get min latitude per species and combine the dataframes (made more sense in sample dataset with multiple species)
# and rename the columns before recombining so we can tell what summarization is in each
df_min_lats = pd.DataFrame(df.pivot_table('LATITUDE', index='COMMON NAME', aggfunc='min'))
df_min_lats.rename(columns={'LATITUDE': 'MIN LATITUDE'}, inplace=True)
df_max_lats = pd.DataFrame(df.pivot_table('LATITUDE', index='COMMON NAME', aggfunc='max'))
df_max_lats.rename(columns={'LATITUDE': 'MAX LATITUDE'}, inplace=True)
# print(df_min_lats,df_max_lats)
df_min_max = pd.concat([df_min_lats, df_max_lats], axis=1) # removed df_grouped
print('\nSummary: latitude ranges and counts:')
print(df_min_max)
# show min max latitude for Ruby-Throated Hummingbird by month
# calculate month from observation date (forget year)
df['OBS MONTH'] = df['OBSERVATION DATE'].str[5:7]
# and rename the columns before recombining so we can tell what summarization is in each
df_min_lats = pd.DataFrame(df.pivot_table('LATITUDE', index='OBS MONTH', aggfunc='min'))
df_min_lats.rename(columns={'LATITUDE': 'MIN LATITUDE'}, inplace=True)
df_max_lats = pd.DataFrame(df.pivot_table('LATITUDE', index='OBS MONTH', aggfunc='max'))
df_max_lats.rename(columns={'LATITUDE': 'MAX LATITUDE'}, inplace=True)
# print(df_min_lats,df_max_lats)
df_min_max = pd.concat([df_min_lats, df_max_lats], axis=1) # removed df_grouped
print('\nSummary: latitude ranges by Observation Month (in any year 2013-2015):')
print(df_min_max)
| gpl-2.0 |
xzh86/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
QRAAT/QRAAT | server/scripts/pos/stat_pos.py | 1 | 3751 | #!/usr/bin/env python2
# stat_pos.py - Calculate velocity and acceleration of raw positions.
#
# Copyright (C) 2013 Christopher Patton
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import qraat, qraat.srv
import time, os, sys, commands, re
import MySQLdb as mdb
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pp
from optparse import OptionParser
# Check for running instances of this program.
(status, output) = commands.getstatusoutput(
'pgrep -c `basename %s`' % (sys.argv[0]))
if int(output) > 1:
print >>sys.stderr, "stat_pos: error: attempted reentry, exiting"
sys.exit(1)
parser = OptionParser()
parser.description = ''' '''
parser.add_option('--t-start', type='str', metavar='YYYY-MM-DD', default='2013-08-13')
parser.add_option('--t-end', type='str', metavar='YYYY-MM-DD', default='2013-08-14')
parser.add_option('--tx-id', type='int', metavar='ID', default=51)
(options, args) = parser.parse_args()
try:
start = time.time()
print "stat_pos: start time:", time.asctime(time.localtime(start))
db_con = qraat.srv.util.get_db('reader')
try:
t_start = time.mktime(time.strptime(options.t_start, "%Y-%m-%d"))
t_end = time.mktime(time.strptime(options.t_end, "%Y-%m-%d"))
except ValueError:
print >>sys.stderr, "star_pos: error: malformed date string."
sys.exit(1)
V = qraat.srv.track.transition_distribution(db_con, t_start, t_end, options.tx_id)
if len(V) > 0:
(mean, stddev) = (np.mean(V), np.std(V))
m = 10
n, bins, patches = pp.hist(V, 75, range=[0,m], normed=1, histtype='stepfilled')
pp.setp(patches, 'facecolor', 'b', 'alpha', 0.20)
pp.title('%s to %s txID=%d' % (options.t_start, options.t_end, options.tx_id))
pp.xlabel("M / sec")
pp.ylabel("Probability density")
pp.text(0.7 * m, 0.8 * max(n),
('Target speed\n'
' $\mu=%d$\n'
' $\sigma=%d$\n'
' range (%d, %d)') % (mean, stddev, min(V), max(V)))
pp.savefig('tx%d_%s_velocity.png' % (options.tx_id, options.t_start))
print len(positions.track)
pp.clf()
pp.plot(
map(lambda (P, t): P.imag, positions.track),
map(lambda (P, t): P.real, positions.track), '.', alpha=0.3)
pp.savefig('plot.png')
else:
print >>sys.stderr, "stat_pos: no data."
# pp.clf()
# (mean, stddev) = (0, 0)
# m = 20
# n, bins, patches = pp.hist(A, 75, range=[0,m], normed=1, histtype='stepfilled')
# pp.setp(patches, 'facecolor', 'b', 'alpha', 0.20)
# pp.title('%s to %s txID=%d' % (options.t_start, options.t_end, options.tx_id))
# pp.xlabel("M / sec$^2$")
# pp.ylabel("Probability density")
# pp.text(0.7 * m, 0.8 * max(n),
# ('Target acceleration\n'
# ' $\mu=%d$\n'
# ' $\sigma=%d$\n'
# ' range (%d, %d)') % (mean, stddev, min(A), max(A)))
# pp.savefig('tx%d_%s_accel.png' % (options.tx_id, options.t_start))
except mdb.Error, e:
print >>sys.stderr, "stat_pos: error: [%d] %s" % (e.args[0], e.args[1])
sys.exit(1)
except qraat.error.QraatError, e:
print >>sys.stderr, "stat_pos: error: %s." % e
finally:
print "stat_pos: finished in %.2f seconds." % (time.time() - start)
| gpl-3.0 |
metaml/NAB | nab/runner.py | 1 | 10021 | # ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import multiprocessing
import os
import pandas
try:
import simplejson as json
except ImportError:
import json
from nab.corpus import Corpus
from nab.detectors.base import detectDataSet
from nab.labeler import CorpusLabel
from nab.optimizer import optimizeThreshold
from nab.scorer import scoreCorpus
from nab.util import updateThresholds, updateFinalResults
class Runner(object):
"""
Class to run an endpoint (detect, optimize, or score) on the NAB
benchmark using the specified set of profiles, thresholds, and/or detectors.
"""
def __init__(self,
dataDir,
resultsDir,
labelPath,
profilesPath,
thresholdPath,
numCPUs=None):
"""
@param dataDir (string) Directory where all the raw datasets exist.
@param resultsDir (string) Directory where the detector anomaly scores
will be scored.
@param labelPath (string) Path where the labels of the datasets
exist.
@param profilesPath (string) Path to JSON file containing application
profiles and associated cost matrices.
@param thresholdPath (string) Path to thresholds dictionary containing the
best thresholds (and their corresponding
score) for a combination of detector and
user profile.
@probationaryPercent (float) Percent of each dataset which will be
ignored during the scoring process.
@param numCPUs (int) Number of CPUs to be used for calls to
multiprocessing.pool.map
"""
self.dataDir = dataDir
self.resultsDir = resultsDir
self.labelPath = labelPath
self.profilesPath = profilesPath
self.thresholdPath = thresholdPath
self.pool = multiprocessing.Pool(numCPUs)
self.probationaryPercent = 0.15
self.windowSize = 0.10
self.corpus = None
self.corpusLabel = None
self.profiles = None
def initialize(self):
"""Initialize all the relevant objects for the run."""
self.corpus = Corpus(self.dataDir)
self.corpusLabel = CorpusLabel(path=self.labelPath, corpus=self.corpus)
with open(self.profilesPath) as p:
self.profiles = json.load(p)
def detect(self, detectors):
"""Generate results file given a dictionary of detector classes
Function that takes a set of detectors and a corpus of data and creates a
set of files storing the alerts and anomaly scores given by the detectors
@param detectors (dict) Dictionary with key value pairs of a
detector name and its corresponding
class constructor.
"""
print "\nRunning detection step"
count = 0
args = []
for detectorName, detectorConstructor in detectors.iteritems():
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
if self.corpusLabel.labels.has_key(relativePath):
args.append(
(
count,
detectorConstructor(
dataSet=dataSet,
probationaryPercent=self.probationaryPercent),
detectorName,
self.corpusLabel.labels[relativePath]["label"],
self.resultsDir,
relativePath
)
)
count += 1
self.pool.map(detectDataSet, args)
def optimize(self, detectorNames):
"""Optimize the threshold for each combination of detector and profile.
@param detectorNames (list) List of detector names.
@return thresholds (dict) Dictionary of dictionaries with detector names
then profile names as keys followed by another
dictionary containing the score and the
threshold used to obtained that score.
"""
print "\nRunning optimize step"
scoreFlag = False
thresholds = {}
for detectorName in detectorNames:
resultsDetectorDir = os.path.join(self.resultsDir, detectorName)
resultsCorpus = Corpus(resultsDetectorDir)
thresholds[detectorName] = {}
for profileName, profile in self.profiles.iteritems():
thresholds[detectorName][profileName] = optimizeThreshold(
(self.pool,
detectorName,
profileName,
profile["CostMatrix"],
resultsDetectorDir,
resultsCorpus,
self.corpusLabel,
self.probationaryPercent,
scoreFlag))
updateThresholds(thresholds, self.thresholdPath)
return thresholds
def score(self, detectorNames, thresholds):
"""Score the performance of the detectors.
Function that must be called only after detection result files have been
generated and thresholds have been optimized. This looks at the result files
and scores the performance of each detector specified and stores these
results in a csv file.
@param detectorNames (list) List of detector names.
@param thresholds (dict) Dictionary of dictionaries with detector
names then profile names as keys followed by
another dictionary containing the score and
the threshold used to obtained that score.
"""
print "\nRunning scoring step"
scoreFlag = True
baselines = {}
self.resultsFiles = []
for detectorName in detectorNames:
resultsDetectorDir = os.path.join(self.resultsDir, detectorName)
resultsCorpus = Corpus(resultsDetectorDir)
for profileName, profile in self.profiles.iteritems():
threshold = thresholds[detectorName][profileName]["threshold"]
resultsDF = scoreCorpus(threshold,
(self.pool,
detectorName,
profileName,
profile["CostMatrix"],
resultsDetectorDir,
resultsCorpus,
self.corpusLabel,
self.probationaryPercent,
scoreFlag))
scorePath = os.path.join(resultsDetectorDir, "%s_%s_scores.csv" %\
(detectorName, profileName))
resultsDF.to_csv(scorePath, index=False)
print "%s detector benchmark scores written to %s" %\
(detectorName, scorePath)
self.resultsFiles.append(scorePath)
def normalize(self):
"""Normalize the detectors' scores according to the Baseline, and print to
the console.
Function can only be called with the scoring step (i.e. runner.score())
preceding it.
This reads the total score values from the results CSVs, and
adds the relevant baseline value. The scores are then normalized by
multiplying by 100/perfect, where the perfect score is the number of TPs
possible (i.e. 44.0).
Note the results CSVs still contain the original scores, not normalized.
"""
print "\nRunning score normalization step"
# Get baselines for each application profile.
baselineDir = os.path.join(self.resultsDir, "baseline")
if not os.path.isdir(baselineDir):
raise IOError("No results directory for baseline. You must "
"run the baseline detector before normalizing scores.")
baselines = {}
for profileName, _ in self.profiles.iteritems():
fileName = os.path.join(baselineDir,
"baseline_" + profileName + "_scores.csv")
with open(fileName) as f:
results = pandas.read_csv(f)
baselines[profileName] = results["Score"].iloc[-1]
# Normalize the score from each results file.
finalResults = {}
for resultsFile in self.resultsFiles:
profileName = [k for k in baselines.keys() if k in resultsFile][0]
base = baselines[profileName]
with open(resultsFile) as f:
results = pandas.read_csv(f)
# Calculate score:
perfect = 44.0 - base
score = (-base + results["Score"].iloc[-1]) * (100/perfect)
# Add to results dict:
resultsInfo = resultsFile.split('/')[-1].split('.')[0]
detector = resultsInfo.split('_')[0]
profile = resultsInfo.replace(detector + "_", "").replace("_scores", "")
if detector not in finalResults:
finalResults[detector] = {}
finalResults[detector][profile] = score
print ("Final score for \'%s\' detector on \'%s\' profile = %.2f"
% (detector, profile, score))
resultsPath = os.path.join(self.resultsDir, "final_results.json")
updateFinalResults(finalResults, resultsPath)
print "Final scores have been written to %s." % resultsPath
| agpl-3.0 |
jameshensman/cgt | thirdparty/tabulate.py | 24 | 29021 | # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from platform import python_version_tuple
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = unicode
_binary_type = str
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_float_type = float
_text_type = str
_binary_type = bytes
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.2"
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _latex_line_begin_tabular(colwidths, colaligns):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\\begin{tabular}{" + tabular_columns_fmt + "}\n\hline"
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=DataRow("", "&", "\\\\"),
datarow=DataRow("", "&", "\\\\"),
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile("\x1b\[\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except ValueError:
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is int or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
iwidth = width + len(s) - len(_strip_invisible(s)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(s)
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return len(_strip_invisible(s))
else:
return len(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
return _text_type(val, "ascii")
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")): # namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif headers == "keys" and len(rows) > 0: # keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(headers)
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, a two-dimensional NumPy array,
NumPy record array, or a Pandas' dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
and 'latex'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"""
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = len
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h)+2 for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
| mit |
mohammadKhalifa/word_cloud | examples/colored_by_group.py | 1 | 4407 | #!/usr/bin/env python
"""
Colored by Group Example
===============
Generating a word cloud that assigns colors to words based on
a predefined mapping from colors to words
"""
from wordcloud import (WordCloud, get_single_color_func)
import matplotlib.pyplot as plt
class SimpleGroupedColorFunc(object):
"""Create a color function object which assigns EXACT colors
to certain words based on the color to words mapping
Parameters
----------
color_to_words : dict(str -> list(str))
A dictionary that maps a color to the list of words.
default_color : str
Color that will be assigned to a word that's not a member
of any value from color_to_words.
"""
def __init__(self, color_to_words, default_color):
self.word_to_color = {word: color
for (color, words) in color_to_words.items()
for word in words}
self.default_color = default_color
def __call__(self, word, **kwargs):
return self.word_to_color.get(word, self.default_color)
class GroupedColorFunc(object):
"""Create a color function object which assigns DIFFERENT SHADES of
specified colors to certain words based on the color to words mapping.
Uses wordcloud.get_single_color_func
Parameters
----------
color_to_words : dict(str -> list(str))
A dictionary that maps a color to the list of words.
default_color : str
Color that will be assigned to a word that's not a member
of any value from color_to_words.
"""
def __init__(self, color_to_words, default_color):
self.color_func_to_words = [(get_single_color_func(color), set(words))
for (color, words) in color_to_words.items()]
self.default_color_func = get_single_color_func(default_color)
def get_color_func(self, word):
"""Returns a single_color_func associated with the word"""
try:
color_func = next(color_func
for (color_func, words) in self.color_func_to_words
if word in words)
except StopIteration:
color_func = self.default_color_func
return color_func
def __call__(self, word, **kwargs):
return self.get_color_func(word)(word, **kwargs)
text = """The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!"""
# Since the text is small collocations are turned off and text is lower-cased
wc = WordCloud(collocations=False).generate(text.lower())
color_to_words = {
# words below will be colored with a green single color function
'#00ff00': ['beautiful', 'explicit', 'simple', 'sparse',
'readability', 'rules', 'practicality',
'explicitly', 'one', 'now', 'easy', 'obvious', 'better'],
# will be colored with a red single color function
'red': ['ugly', 'implicit', 'complex', 'complicated', 'nested',
'dense', 'special', 'errors', 'silently', 'ambiguity',
'guess', 'hard']
}
# Words that are not in any of the color_to_words values
# will be colored with a grey single color function
default_color = 'grey'
# Create a color function with single tone
# grouped_color_func = SimpleGroupedColorFunc(color_to_words, default_color)
# Create a color function with multiple tones
grouped_color_func = GroupedColorFunc(color_to_words, default_color)
# Apply our color function
wc.recolor(color_func=grouped_color_func)
# Plot
plt.figure()
plt.imshow(wc)
plt.axis("off")
plt.show()
| mit |
gnu-sandhi/sandhi | modules/gr36/gnuradio-core/src/examples/pfb/decimate.py | 17 | 5706 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = gr.firdes.low_pass_2(1, self._fs, 200, 150,
attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._decim))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._decim
print "Taps per channel: ", tpc
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = gr.add_cc()
freqs = [10, 20, 2040]
for i in xrange(len(freqs)):
self.signals.append(gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = blks2.pfb_decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = gr.fir_filter_ccf(self._decim, self._taps)
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = gr.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(16,9))
fig2 = pylab.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
Eric89GXL/mne-python | examples/decoding/plot_decoding_csp_eeg.py | 15 | 5012 | """
.. _ex-decoding-csp-eeg:
===========================================================================
Motor imagery decoding from EEG data using the Common Spatial Pattern (CSP)
===========================================================================
Decoding of motor imagery applied to EEG data decomposed using CSP. A
classifier is then applied to features extracted on CSP-filtered signals.
See https://en.wikipedia.org/wiki/Common_spatial_pattern and
:footcite:`Koles1991`. The EEGBCI dataset is documented in
:footcite:`SchalkEtAl2004` and is available at PhysioNet
:footcite:`GoldbergerEtAl2000`.
"""
# Authors: Martin Billinger <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import ShuffleSplit, cross_val_score
from mne import Epochs, pick_types, events_from_annotations
from mne.channels import make_standard_montage
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
from mne.decoding import CSP
print(__doc__)
# #############################################################################
# # Set parameters and read data
# avoid classification of evoked responses by using epochs that start 1s after
# cue onset.
tmin, tmax = -1., 4.
event_id = dict(hands=2, feet=3)
subject = 1
runs = [6, 10, 14] # motor imagery: hands vs feet
raw_fnames = eegbci.load_data(subject, runs)
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames])
eegbci.standardize(raw) # set channel names
montage = make_standard_montage('standard_1005')
raw.set_montage(montage)
# strip channel names of "." characters
raw.rename_channels(lambda x: x.strip('.'))
# Apply band-pass filter
raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')
events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3))
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True)
epochs_train = epochs.copy().crop(tmin=1., tmax=2.)
labels = epochs.events[:, -1] - 2
###############################################################################
# Classification with linear discrimant analysis
# Define a monte-carlo cross-validation generator (reduce variance):
scores = []
epochs_data = epochs.get_data()
epochs_data_train = epochs_train.get_data()
cv = ShuffleSplit(10, test_size=0.2, random_state=42)
cv_split = cv.split(epochs_data_train)
# Assemble a classifier
lda = LinearDiscriminantAnalysis()
csp = CSP(n_components=4, reg=None, log=True, norm_trace=False)
# Use scikit-learn Pipeline with cross_val_score function
clf = Pipeline([('CSP', csp), ('LDA', lda)])
scores = cross_val_score(clf, epochs_data_train, labels, cv=cv, n_jobs=1)
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
csp.plot_patterns(epochs.info, ch_type='eeg', units='Patterns (AU)', size=1.5)
###############################################################################
# Look at performance over time
sfreq = raw.info['sfreq']
w_length = int(sfreq * 0.5) # running classifier: window length
w_step = int(sfreq * 0.1) # running classifier: window step size
w_start = np.arange(0, epochs_data.shape[2] - w_length, w_step)
scores_windows = []
for train_idx, test_idx in cv_split:
y_train, y_test = labels[train_idx], labels[test_idx]
X_train = csp.fit_transform(epochs_data_train[train_idx], y_train)
X_test = csp.transform(epochs_data_train[test_idx])
# fit classifier
lda.fit(X_train, y_train)
# running classifier: test classifier on sliding window
score_this_window = []
for n in w_start:
X_test = csp.transform(epochs_data[test_idx][:, :, n:(n + w_length)])
score_this_window.append(lda.score(X_test, y_test))
scores_windows.append(score_this_window)
# Plot scores over time
w_times = (w_start + w_length / 2.) / sfreq + epochs.tmin
plt.figure()
plt.plot(w_times, np.mean(scores_windows, 0), label='Score')
plt.axvline(0, linestyle='--', color='k', label='Onset')
plt.axhline(0.5, linestyle='-', color='k', label='Chance')
plt.xlabel('time (s)')
plt.ylabel('classification accuracy')
plt.title('Classification score over time')
plt.legend(loc='lower right')
plt.show()
##############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/spatial/_spherical_voronoi.py | 16 | 13033 | """
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as Scipy.
#
import numpy as np
import numpy.matlib
import scipy
import itertools
from . import _voronoi
from scipy.spatial.distance import pdist
__all__ = ['SphericalVoronoi']
def sphere_check(points, radius, center):
""" Determines distance of generators from theoretical sphere
surface.
"""
actual_squared_radii = (((points[...,0] - center[0]) ** 2) +
((points[...,1] - center[1]) ** 2) +
((points[...,2] - center[2]) ** 2))
max_discrepancy = (np.sqrt(actual_squared_radii) - radius).max()
return abs(max_discrepancy)
def calc_circumcenters(tetrahedrons):
""" Calculates the cirumcenters of the circumspheres of tetrahedrons.
An implementation based on
http://mathworld.wolfram.com/Circumsphere.html
Parameters
----------
tetrahedrons : an array of shape (N, 4, 3)
consisting of N tetrahedrons defined by 4 points in 3D
Returns
----------
circumcenters : an array of shape (N, 3)
consisting of the N circumcenters of the tetrahedrons in 3D
"""
num = tetrahedrons.shape[0]
a = np.concatenate((tetrahedrons, np.ones((num, 4, 1))), axis=2)
sums = np.sum(tetrahedrons ** 2, axis=2)
d = np.concatenate((sums[:, :, np.newaxis], a), axis=2)
dx = np.delete(d, 1, axis=2)
dy = np.delete(d, 2, axis=2)
dz = np.delete(d, 3, axis=2)
dx = np.linalg.det(dx)
dy = -np.linalg.det(dy)
dz = np.linalg.det(dz)
a = np.linalg.det(a)
nominator = np.vstack((dx, dy, dz))
denominator = 2*a
return (nominator / denominator).T
def project_to_sphere(points, center, radius):
"""
Projects the elements of points onto the sphere defined
by center and radius.
Parameters
----------
points : array of floats of shape (npoints, ndim)
consisting of the points in a space of dimension ndim
center : array of floats of shape (ndim,)
the center of the sphere to project on
radius : float
the radius of the sphere to project on
returns: array of floats of shape (npoints, ndim)
the points projected onto the sphere
"""
lengths = scipy.spatial.distance.cdist(points, np.array([center]))
return (points - center) / lengths * radius + center
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, 3)
Coordinates of points to construct a spherical
Voronoi diagram from
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (3,)
Center of sphere (Default: origin)
threshold : float
Threshold for detecting duplicate points and
mismatches between points and sphere parameters.
(Default: 1e-06)
Attributes
----------
points : double array of shape (npoints, 3)
the points in 3D to generate the Voronoi diagram from
radius : double
radius of the sphere
Default: None (forces estimation, which is less precise)
center : double array of shape (3,)
center of the sphere
Default: None (assumes sphere is centered at origin)
vertices : double array of shape (nvertices, 3)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Raises
------
ValueError
If there are duplicates in `points`.
If the provided `radius` is not consistent with `points`.
Notes
----------
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
A 3D Delaunay tetrahedralization is obtained by including the origin of
the coordinate system as the fourth vertex of each simplex of the Convex
Hull. The circumcenters of all tetrahedra in the system are calculated and
projected to the surface of the sphere, producing the Voronoi vertices.
The Delaunay tetrahedralization neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
The surface area of spherical polygons is calculated by decomposing them
into triangles and using L'Huilier's Theorem to calculate the spherical
excess of each triangle [Weisstein]_. The sum of the spherical excesses is
multiplied by the square of the sphere radius to obtain the surface area
of the spherical polygon. For nearly-degenerate spherical polygons an area
of approximately 0 is returned by default, rather than attempting the
unstable calculation.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement). The reconstitution of the surface area of the
sphere, measured as the sum of the surface areas of all Voronoi regions,
is closest to 100 % for larger (>> 10) numbers of generators.
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [Weisstein] "L'Huilier's Theorem." From MathWorld -- A Wolfram Web
Resource. http://mathworld.wolfram.com/LHuiliersTheorem.html
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
>>> from matplotlib import colors
>>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
>>> center = np.array([0, 0, 0])
>>> radius = 1
>>> # calculate spherical Voronoi diagram
>>> sv = SphericalVoronoi(points, radius, center)
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> # generate plot
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... random_color = colors.rgb2hex(np.random.rand(3))
... polygon = Poly3DCollection([sv.vertices[region]], alpha=1.0)
... polygon.set_color(random_color)
... ax.add_collection3d(polygon)
>>> plt.show()
"""
def __init__(self, points, radius=None, center=None, threshold=1e-06):
"""
Initializes the object and starts the computation of the Voronoi
diagram.
points : The generator points of the Voronoi diagram assumed to be
all on the sphere with radius supplied by the radius parameter and
center supplied by the center parameter.
radius : The radius of the sphere. Will default to 1 if not supplied.
center : The center of the sphere. Will default to the origin if not
supplied.
"""
self.points = points
if np.any(center):
self.center = center
else:
self.center = np.zeros(3)
if radius:
self.radius = radius
else:
self.radius = 1
if pdist(self.points).min() <= threshold * self.radius:
raise ValueError("Duplicate generators present.")
max_discrepancy = sphere_check(self.points,
self.radius,
self.center)
if max_discrepancy >= threshold * self.radius:
raise ValueError("Radius inconsistent with generators.")
self.vertices = None
self.regions = None
self._tri = None
self._calc_vertices_regions()
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
# perform 3D Delaunay triangulation on data set
# (here ConvexHull can also be used, and is faster)
self._tri = scipy.spatial.ConvexHull(self.points)
# add the center to each of the simplices in tri to get the same
# tetrahedrons we'd have gotten from Delaunay tetrahedralization
# tetrahedrons will have shape: (2N-4, 4, 3)
tetrahedrons = self._tri.points[self._tri.simplices]
tetrahedrons = np.insert(
tetrahedrons,
3,
np.array([self.center]),
axis=1
)
# produce circumcenters of tetrahedrons from 3D Delaunay
# circumcenters will have shape: (2N-4, 3)
circumcenters = calc_circumcenters(tetrahedrons)
# project tetrahedron circumcenters to the surface of the sphere
# self.vertices will have shape: (2N-4, 3)
self.vertices = project_to_sphere(
circumcenters,
self.center,
self.radius
)
# calculate regions from triangulation
# simplex_indices will have shape: (2N-4,)
simplex_indices = np.arange(self._tri.simplices.shape[0])
# tri_indices will have shape: (6N-12,)
tri_indices = np.column_stack([simplex_indices, simplex_indices,
simplex_indices]).ravel()
# point_indices will have shape: (6N-12,)
point_indices = self._tri.simplices.ravel()
# array_associations will have shape: (6N-12, 2)
array_associations = np.dstack((point_indices, tri_indices))[0]
array_associations = array_associations[np.lexsort((
array_associations[...,1],
array_associations[...,0]))]
array_associations = array_associations.astype(np.intp)
# group by generator indices to produce
# unsorted regions in nested list
groups = []
for k, g in itertools.groupby(array_associations,
lambda t: t[0]):
groups.append(list(list(zip(*list(g)))[1]))
self.regions = groups
def sort_vertices_of_regions(self):
"""
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the projected circumcenter of the
tetrahedron obtained by the k-th triangle in _tri.simplices (and the
origin). For each region n, we choose the first triangle (=Voronoi
vertex) in _tri.simplices and a vertex of that triangle not equal to
the center n. These determine a unique neighbor of that triangle,
which is then chosen as the second triangle. The second triangle
will have a unique vertex not equal to the current vertex or the
center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
_voronoi.sort_vertices_of_regions(self._tri.simplices,
self.regions)
| gpl-3.0 |
jcatw/scnn | scnn/baseline_edge_experiment.py | 1 | 2403 | __author__ = 'jatwood'
import sys
import numpy as np
from sklearn.metrics import f1_score, accuracy_score
from sklearn.linear_model import LogisticRegression
import data
import kernel
def baseline_edge_experiment(model_fn, data_fn, data_name, model_name):
print 'Running edge experiment (%s)...' % (data_name,)
A, B, _, X, Y = data_fn()
selection_indices = np.arange(B.shape[0])
np.random.shuffle(selection_indices)
selection_indices = selection_indices[:10000]
print selection_indices
B = B[selection_indices,:]
X = X[selection_indices,:]
Y = Y[selection_indices,:]
n_edges = B.shape[0]
indices = np.arange(n_edges)
np.random.shuffle(indices)
print indices
train_indices = indices[:n_edges // 3]
valid_indices = indices[n_edges // 3:(2* n_edges) // 3]
test_indices = indices[(2* n_edges) // 3:]
best_C = None
best_acc = float('-inf')
for C in [10**(-x) for x in range(-4,4)]:
m = model_fn(C)
m.fit(X[train_indices,:], np.argmax(Y[train_indices,:],1))
preds = m.predict(X[valid_indices,:])
actuals = np.argmax(Y[valid_indices,:],1)
accuracy = accuracy_score(actuals, preds)
if accuracy > best_acc:
best_C = C
best_acc = accuracy
m = model_fn(best_C)
m.fit(X[train_indices,:], np.argmax(Y[train_indices],1))
preds = m.predict(X[test_indices,:])
actuals = np.argmax(Y[test_indices,:],1)
accuracy = accuracy_score(actuals, preds)
f1_micro = f1_score(actuals, preds, average='micro')
f1_macro = f1_score(actuals, preds, average='macro')
print 'form: name,micro_f,macro_f,accuracy'
print '###RESULTS###: %s,%s,%.8f,%.8f,%.8f' % (data_name, model_name, f1_micro, f1_macro, accuracy)
if __name__ == '__main__':
np.random.seed()
args = sys.argv[1:]
name_to_data = {
'wikirfa': lambda: data.parse_wikirfa(n_features=250)
}
baseline_models = {
'logisticl1': lambda C: LogisticRegression(penalty='l1', C=C),
'logisticl2': lambda C: LogisticRegression(penalty='l2', C=C),
}
data_name = args[0]
data_fn = name_to_data[data_name]
model_name = args[1]
if model_name in baseline_models:
baseline_edge_experiment(baseline_models[model_name], data_fn, data_name, model_name)
else:
print '%s not recognized' % (model_name,)
| mit |
chrsrds/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 10 | 4351 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def ami_score(U, V):
return metrics.adjusted_mutual_info_score(U, V)
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
ami_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(bottom=-0.05, top=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(bottom=-0.05, top=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
procoder317/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
B3AU/waveTree | sklearn/metrics/tests/test_score_objects.py | 4 | 4569 | import pickle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics import make_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.cluster import KMeans
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_blobs, load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
def test_make_scorer():
"""Sanity check on the make_scorer factory function."""
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
"""Test classification scorers."""
X, y = make_blobs(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['f1'](clf, X_test, y_test)
score2 = f1_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
"""Test regression scorers."""
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = SCORERS['r2'](clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
"""Test scorers that take thresholds."""
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = SCORERS['log_loss'](clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, SCORERS['roc_auc'], clf, X_test, y_test)
def test_unsupervised_scorers():
"""Test clustering scorers against gold standard labeling."""
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = SCORERS['adjusted_rand_score'](km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
"""Test that when a list of scores is returned, we raise proper errors."""
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
| bsd-3-clause |
caseyclements/bokeh | bokeh/tests/test_sources.py | 26 | 3245 | from __future__ import absolute_import
import unittest
from unittest import skipIf
import warnings
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource, ServerDataSource
class TestColumnDataSourcs(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
assert name
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists2(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
class TestServerDataSources(unittest.TestCase):
def test_basic(self):
ds = ServerDataSource()
self.assertTrue(isinstance(ds, DataSource))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
DavidBreuer/CytoSeg | CytoSeg/extraction.py | 1 | 9141 | ################################################################################
# Module: test.py
# Description: Test imports and network extraction
# License: GPL3, see full license in LICENSE.txt
# Web: https://github.com/DavidBreuer/CytoSeg
################################################################################
#%%############################################################################# imports
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import os
import pandas as pd
import random
import scipy as sp
import skimage
import skimage.io
import skimage.filters
import sys
import utils
#%%############################################################################# parameters
sigma=2.0 # tubeness filter width
block=101.0 # adaptive median filter block size
small=25.0 # smallest component size threshold
factr=0.5 # fraction of average intensity threshold
randw=0 # randomization method (0 = shuffle edge weights only / 1 = shuffle nodes and edges)
randn=20 # number of randomized networks
depth=7.75 # spacing between z-slices in xy-pixels spacings (1mum / 0.129mum/pixel = 7.75 pixels)
path='' # directory with actin and Golgi images
aa='actin_filter.tif' # name of actin image
gg='golgi_filter.tif' # name of Golgi image
#%%############################################################################# extract and randomize networks
imO=skimage.io.imread(path+aa,plugin='tifffile') # open actin image
I=len(imO) # get number of frames
Z=1 # set number of z-slices
shape=imO.shape
if(len(shape)>3):
Z=shape[3]
imT=skimage.io.imread(path+gg,plugin='tifffile') # open Golgi image
track=utils.xmlread(path+'track.xml') # read Golgi tracking results
T=len(track) # get number of tracks
mask=skimage.io.imread(path+'mask.tif',plugin='tifffile')>0 # open mask
R=randn # rename number of randomized networks
#%%#
dataB=[] # empty list for extracted networks and computed properties
dataP=[]
dataR=[]
for i in range(I): # for each frame...
print('extract',i,I,'segment')
imI=imO[i] # get actin image
imI=utils.im2d3d(imI) # if 2D image convert to 3D
imG=skimage.filters.gaussian(imI,sigma) # apply Gaussian filter
imR,imA=utils.skeletonize_graph(imG,mask,sigma,block,small,factr) # filter and skeletonize actin image
imE=utils.node_graph(imA>0,imG) # detect filaments and network nodes
print('extract',i,I,'graph')
gBo,pos=utils.make_graph(imE,imG) # construct graph from filament and node image
gBu=utils.unify_graph(gBo) # project multigraph to simple graph
gBc=utils.connect_graph(gBu,pos,imG) # connect disconnected components of graph
gBx=utils.centralize_graph(gBc) # compute edge centrality measures
gBn=utils.normalize_graph(gBx) # normalize total edge capacity to one
quant=utils.compute_graph(gBn,pos,mask) # compute graph properties
dataB.append([i,gBn,pos,quant]) # append data
for r in range(R):
print('extract',i,I,'randomize',r,R)
gRo,poz=utils.randomize_graph(gBu,pos,mask,planar=1,weights=randw) # randomize biological network
gRu=utils.unify_graph(gRo) # project multigraph to simple graph
gRc=utils.connect_graph(gRu,poz,imG) # connect disconnected components of graph
gRx=utils.centralize_graph(gRc) # compute edge centrality measures
gRn=utils.normalize_graph(gRx) # normalize total edge capacity to one
quant=utils.compute_graph(gRn,poz,mask) # compute graph properties
dataR.append([i,gRn,poz,quant]) # append data
#%%############################################################################# plot and export data
print('export','plot')
i=0 # choose time point for plotting
r=0 # choose randomized network for plotting
gB,pB=dataB[i*1+0][1],dataB[i*1+0][2] # get data for biological and randomized network
gR,pR=dataR[i*R+r][1],dataR[i*R+r][2]
plt.clf()
gs=mpl.gridspec.GridSpec(1,3,width_ratios=[1,1,1],height_ratios=[1],left=0.01,bottom=0.01,right=0.99,top=0.99,wspace=0.1,hspace=0.1)
aspect=2.0
alpha=1.0
lw=1.5
wh=np.array(np.where(mask))[::-1]
axis=np.hstack(zip(np.nanmin(wh,1),np.nanmax(wh,1)))
plt.subplot(gs[0]) # plot actin image and extracted biological network
plt.title('biological\nactin network')
plt.imshow(imO[i],cmap='Greys',interpolation='nearest',aspect=aspect)
ec=1.0*np.array([d['capa'] for u,v,d in gB.edges(data=True)])
nx.draw_networkx_edges(gB,pB[:,:2],edge_color=plt.cm.jet(ec/ec.max()),width=lw,alpha=alpha)
plt.axis(axis)
plt.axis('off')
plt.subplot(gs[1]) # plot actin image and randomized network
plt.title('randomized\nactin network')
plt.imshow(imO[i],cmap='Greys',interpolation='nearest',aspect=aspect)
ec=1.0*np.array([d['capa'] for u,v,d in gR.edges(data=True)])
nx.draw_networkx_edges(gR,pR[:,:2],edge_color=plt.cm.jet(ec/ec.max()),width=lw,alpha=alpha)
plt.axis(axis)
plt.axis('off')
plt.subplot(gs[2]) # plot Golgi image and tracks
plt.title('Golgi tracks')
plt.imshow(imT[i],cmap='Greys',interpolation='nearest',aspect=aspect)
for ti,t in enumerate(track[::-1]):
plt.plot(t[:,1],t[:,2],color=plt.cm.jet(1.0*ti/T),lw=lw,alpha=0.5)
plt.axis(axis)
plt.axis('off')
plt.savefig(path+'out_plot.pdf')
#%%#
print('export','track')
idt=np.hstack([np.repeat(ti,len(t)) for ti,t in enumerate(track)]) # convert Golgi tracks to list
tracka=np.vstack([idt,np.vstack(track).T]).T
columns=['ID','t0','x0','y0','z0','avg.intensity0','tot.intensity0','quality0','diameter0','t1','x1','y1','z1','avg.intensity1','tot.intensity1','quality1','diameter1'] # name of recorded Golgi features
df=pd.DataFrame(tracka,columns=columns) # save Golgi tracks as list
df.to_csv(path+'out_track.csv',sep=';',encoding='utf-8')
#%%#
print('export','graph')
nx.write_gml(gBn,path+'out_graph.gml') # save examplary actin network as graph
#%%#
print('export','data')
quants=['time','# nodes','# edges','# connected components','avg. edge capacity','assortativity','avg. path length','CV path length','algebraic connectivity','CV edge angles','crossing number'] # list of computed network properties
quanta=np.array([np.hstack([d[0],d[-1]]) for d in dataB]) # save properties of biological networks
df=pd.DataFrame(quanta,columns=quants)
df.to_csv(path+'out_biol.csv',sep=';',encoding='utf-8')
quanta=np.array([np.hstack([d[0],d[-1]]) for d in dataR]) # save properties of randomized networks
df=pd.DataFrame(quanta,columns=quants)
df.to_csv(path+'out_rand.csv',sep=';',encoding='utf-8')
| gpl-3.0 |
RealTimeWeb/datasets | preprocess/video_games/download_hltb.py | 1 | 8665 | '''
Download CSV file at https://researchportal.port.ac.uk/portal/en/datasets/video-games-dataset(d4fe28cd-1e44-4d2f-9db6-85b347bf761e).html
Rename to "games.csv"
'''
import requests
import requests_cache
import pandas as pd
import bs4
import json
from difflib import SequenceMatcher
from unidecode import unidecode
from pprint import pprint
from tqdm import tqdm
requests_cache.install_cache('hltb.db', allowable_methods=('GET', 'POST'))
manual_corrections = {
"Lumines: Puzzle Fusion": "Lumines",
"Metal Gear Ac!d": "Metal Gear Acid",
'Metal Gear Ac!d 2': 'Metal Gear Acid 2',
"Mr. DRILLER: Drill Spirits": "Mr. Driller Drill Spirits",
"Armored Core Formula Front - Extreme Battle": "Armored Core: Formula Front Extreme Battle",
"Brain Age\xfd: More Training in Minutes a Day!": "Brain Age 2: More Training in Minutes a Day!",
"SOCOM U.S. Navy SEALs - Fireteam Bravo": "SOCOM: U.S. Navy SEALs Fireteam Bravo",
"Peter Jackson's King Kong: The Official Game of...": "Peter Jackson's King Kong",
"Star Wars: Episode III - Revenge of the Sith": "Star Wars Episode III Revenge of the Sith",
'The Chronicles of Narnia: The Lion, the Witch a...': 'The Chronicles of Narnia: The Lion, the Witch and the Wardrobe',
'Mega Man Battle Network 5: Double Team DS': 'Mega Man Battle Network 5',
'Viva Pi\xa4ata': 'Viva Pinata',
'F.E.A.R.: First Encounter Assault Recon': 'FEAR',
'The Lord of the Rings: The Battle for Middle Ea...': 'The Lord of the Rings: The Battle for Middle-earth',
'Final Fantasy XI Online': 'Final Fantasy XI',
'Project Sylpheed: Arc of Deception': 'Project Sylpheed',
'007: From Russia with Love': 'From Russia with Love',
"Ultimate Ghosts 'N' Goblins": "Ultimate Ghosts 'n Goblins",
'Mega Man Star Force 2: Zerker X Ninja': 'Mega Man Star Force 2',
'Pok\x82mon: Platinum Version': 'Pokemon Platinum',
'Guilty Gear XX ? Core': 'Guilty Gear XX Accent Core',
'Lara Croft Tomb Raider: Legend': 'Tomb raider legend',
'Ben 10: Protector of the Earth': 'Ben 10: Protector of Earth',
'Mega Man Star Force: Dragon': 'Mega Man Star Force',
'Unreal Tournament III': 'Unreal Tournament 3',
"Disney Pirates of the Caribbean: At World's End": "Pirates of the Caribbean: At World's End",
'Silent Hill: 0rigins': 'Silent Hill: Origins',
'BWii: Battalion Wars 2': 'Battalion Wars 2',
'Godzilla Unleashed: Double Smash': 'Godzilla Unleashed',
'?kami': 'Okami',
'Jak & Daxter: The Lost Frontier': 'Jak and Daxter: The Lost Frontier',
'Watchmen: The End is Nigh: Parts 1 and 2': 'Watchmen: The End Is Nigh - Complete'
}
skip_list = {
'Ping Pals', 'Namco Museum Battle Collection',
'NBA Street Showdown', 'Midway Arcade Treasures: Extended Play',
'World Championship Poker 2 featuring Howard Led...',
'FIFA Soccer 06', 'FIFA Soccer 07',
'FIFA World Cup: Germany 2006',
'World Tour Soccer', 'Snood 2: On Vacation',
'Frantix', 'The Hustle: Detroit Streets',
'Smart Bomb', 'Elf Bowling 1&2',
'Rainbow Islands Revolution', 'Kao Challengers', 'Disney/Pixar Cars',
'MX vs. ATV: On the Edge', 'GT Pro Series',
'Brain Boost: Beta Wave',
'Brain Boost: Gamma Wave',
'World Series of Poker: Tournament of Champions',
'Tamagotchi Connexion: Corner Shop',
'FIFA World Cup: Germany 2006',
'NBA Ballers: Rebound',
'World Series of Poker: Tournament of Champions',
"Charlotte's Web",
'Finding Nemo: Escape to the Big Blue',
'MLB',
'Trace Memory',
'NBA',
'Freedom Wings',
'Dawn of Discovery'
}
def parse_time(a_time):
if a_time.strip() == '--':
return 0
if '-' in a_time:
first, second = a_time.split('-')
first, second = parse_time(first), parse_time(second)
return (first+second)/2
a_time = a_time.replace('\xbd', '.5')
if 'Mins' in a_time:
a_time = a_time.replace('Mins', '')
a_time = float(a_time.strip())/60
return a_time
elif 'Hours' in a_time:
a_time = a_time.replace('Hours', '')
a_time = float(a_time.strip())
return a_time
else:
raise Exception(a_time)
def parse_hm(a_time):
a_time = a_time.strip()
if a_time == '--':
return 0.0
elif ' ' in a_time:
hh, mm = a_time.split(' ')
hh, mm = hh[:-1], mm[:-1]
return float(hh) + float(mm)/60
elif 'h' in a_time:
return float(a_time[:-1])
elif 'm' in a_time:
return float(a_time[:-1])/60
def parse_poll(a_poll):
a_poll = a_poll.strip()
if 'K' in a_poll:
return int(float(a_poll[:-1]) * 1000)
else:
return int(a_poll)
def reverse_one_hot(row, columns, transform=None):
if transform is None:
transform = lambda x: x
for c in columns:
if row[c] == 1:
return transform(c)
def get_true_columns(row, columns, transform=None):
if transform is None:
transform = lambda x: x
return [transform(c) for c in columns if row[c] == 1]
DEFAULT_TIMES = {
'Polled': 0,
'Average': 0,
'Median': 0,
'Rushed': 0,
'Leisure': 0
}
GAME_TIME_COLUMNS = ['Average', 'Median', 'Rushed', 'Leisure']
PUBLISHER_COLUMNS = '2K Acclaim Activision Atari Capcom Disney Eidos EA Infograme Konami Microsoft Midway Namco Nintendo Rockstar Sony Sega THQ SquareEnix Ubisoft'.split()
df = pd.read_csv('games.csv', encoding = 'latin1')
df = df[~df['Title'].isin(skip_list)]
result = []
for index, data in tqdm(df.iterrows()):
name = data['Title']
original_name = name
if name.endswith('...'):
name = name.rsplit(' ', maxsplit=1)[0]
if name in manual_corrections:
name = manual_corrections[name]
name = (name.replace(':', '')
.replace(' - ', ' '))
name = (name.replace('\xa4', 'n')
.replace('\x82', 'e')
.replace('\x8b', 'i')
.replace('\x81', 'u')
)
SEARCH_URL = 'https://howlongtobeat.com/search_main.php?page=1'
search_results = requests.post(SEARCH_URL, data={
'queryString': name,
't': 'games',
'sorthead': 'popular',
'sortd':'Normal Order'
}).content
soup = bs4.BeautifulSoup(search_results, 'lxml')
top_anchor = soup.find('a')
if top_anchor is None:
#print("MISSING:", original_name.encode('utf-8'), "||", name.encode('utf-8'))
continue
title, url = top_anchor['title'], top_anchor['href']
match = SequenceMatcher(None, name, title).ratio()
if match < .5:
#print("MISMATCH?", name, "||", title)
pass
full_url = 'https://howlongtobeat.com/' + url
game_page = requests.get(full_url).content
soup = bs4.BeautifulSoup(game_page, 'lxml')
game_table = soup.select('h3.back_blue + div table.game_main_table tbody tr')
game_times = {'Main Story': DEFAULT_TIMES.copy(),
'Main + Extras': DEFAULT_TIMES.copy(),
'Completionists': DEFAULT_TIMES.copy(),
'All PlayStyles': DEFAULT_TIMES.copy()}
for row in game_table:
tds = [td.text for td in row.select("td")]
PlayStyle = tds[0]
game_times[PlayStyle]['Polled'] = parse_poll(tds[1])
for name, value in zip(GAME_TIME_COLUMNS, tds[2:]):
game_times[PlayStyle][name] = parse_hm(value)
if 1 in (data['Accessory'], data['LtdEdition']):
continue
result.append({
'Length': game_times,
'Release': {
'Year': int(data['YearReleased']),
'Console': data['Console'],
'Re-release?': bool(data['Re-release']),
'Rating': reverse_one_hot(data, ['RatingE','RatingT','RatingM'], lambda x: x[-1]),
},
'Title': unidecode(data['Title']),
'Metadata': {
'Publishers': get_true_columns(data, PUBLISHER_COLUMNS),
'Genres': [x.strip()
for x in data['Genre'].split(",")],
'Licensed?': bool(data['Licensed']),
'Sequel?': bool(data['Sequel']),
},
'Metrics': {
'Sales': float(data['US Sales (millions)']),
'Used Price': float(data['Usedprice']),
'Review Score': int(data['Review Score']),
},
'Features': {
'Max Players': int(data['MaxPlayers']),
'Online?': bool(data['Online']),
'Handheld?': bool(data['Handheld']),
'Multiplatform?': bool(data['Multiplatform']),
}
})
with open('video_games.json', 'w') as out:
json.dump(result, out, indent=2) | gpl-2.0 |
hsuantien/scikit-learn | examples/classification/plot_lda_qda.py | 164 | 4806 | """
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
| bsd-3-clause |
sravan-s/zeppelin | spark/src/main/resources/python/zeppelin_pyspark.py | 16 | 12106 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
import ast
import warnings
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, Row
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(dict):
def __init__(self, zc):
self.z = zc
self._displayhook = lambda *args: None
def show(self, obj):
from pyspark.sql import DataFrame
if isinstance(obj, DataFrame):
print(self.z.showData(obj._jdf))
else:
print(str(obj))
# By implementing special methods it makes operating on it more Pythonic
def __setitem__(self, key, item):
self.z.put(key, item)
def __getitem__(self, key):
return self.z.get(key)
def __delitem__(self, key):
self.z.remove(key)
def __contains__(self, item):
return self.z.containsKey(item)
def add(self, key, value):
self.__setitem__(key, value)
def put(self, key, value):
self.__setitem__(key, value)
def get(self, key):
return self.__getitem__(key)
def getInterpreterContext(self):
return self.z.getInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.input(name, defaultValue)
def select(self, name, options, defaultValue=""):
# auto_convert to ArrayList doesn't match the method signature on JVM side
tuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
iterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(tuples)
return self.z.select(name, defaultValue, iterables)
def checkbox(self, name, options, defaultChecked=None):
if defaultChecked is None:
defaultChecked = []
optionTuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
optionIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(optionTuples)
defaultCheckedIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(defaultChecked)
checkedItems = gateway.jvm.scala.collection.JavaConversions.seqAsJavaList(self.z.checkbox(name, defaultCheckedIterables, optionIterables))
result = []
for checkedItem in checkedItems:
result.append(checkedItem)
return result;
def registerHook(self, event, cmd, replName=None):
if replName is None:
self.z.registerHook(event, cmd)
else:
self.z.registerHook(event, cmd, replName)
def unregisterHook(self, event, replName=None):
if replName is None:
self.z.unregisterHook(event)
else:
self.z.unregisterHook(event, replName)
def getHook(self, event, replName=None):
if replName is None:
return self.z.getHook(event)
return self.z.getHook(event, replName)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72, fontsize=10,
interactive=True, format='png', context=self.z)
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def __tupleToScalaTuple2(self, tuple):
if (len(tuple) == 2):
return gateway.jvm.scala.Tuple2(tuple[0], tuple[1])
else:
raise IndexError("options must be a list of tuple of 2")
class SparkVersion(object):
SPARK_1_4_0 = 10400
SPARK_1_3_0 = 10300
SPARK_2_0_0 = 20000
def __init__(self, versionNumber):
self.version = versionNumber
def isAutoConvertEnabled(self):
return self.version >= self.SPARK_1_4_0
def isImportAllPackageUnderSparkSql(self):
return self.version >= self.SPARK_1_3_0
def isSpark2(self):
return self.version >= self.SPARK_2_0_0
class PySparkCompletion:
def __init__(self, interpreterObject):
self.interpreterObject = interpreterObject
def getGlobalCompletion(self):
objectDefList = []
try:
for completionItem in list(globals().keys()):
objectDefList.append(completionItem)
except:
return None
else:
return objectDefList
def getMethodCompletion(self, text_value):
execResult = locals()
if text_value == None:
return None
completion_target = text_value
try:
if len(completion_target) <= 0:
return None
if text_value[-1] == ".":
completion_target = text_value[:-1]
exec("{} = dir({})".format("objectDefList", completion_target), globals(), execResult)
except:
return None
else:
return list(execResult['objectDefList'])
def getCompletion(self, text_value):
completionList = set()
globalCompletionList = self.getGlobalCompletion()
if globalCompletionList != None:
for completionItem in list(globalCompletionList):
completionList.add(completionItem)
if text_value != None:
objectCompletionList = self.getMethodCompletion(text_value)
if objectCompletionList != None:
for completionItem in list(objectCompletionList):
completionList.add(completionItem)
if len(completionList) <= 0:
self.interpreterObject.setStatementsFinished("", False)
else:
result = json.dumps(list(filter(lambda x : not re.match("^__.*", x), list(completionList))))
self.interpreterObject.setStatementsFinished(result, False)
client = GatewayClient(port=int(sys.argv[1]))
sparkVersion = SparkVersion(int(sys.argv[2]))
if sparkVersion.isSpark2():
from pyspark.sql import SparkSession
else:
from pyspark.sql import SchemaRDD
if sparkVersion.isAutoConvertEnabled():
gateway = JavaGateway(client, auto_convert = True)
else:
gateway = JavaGateway(client)
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
intp = gateway.entry_point
output = Logger()
sys.stdout = output
sys.stderr = output
intp.onPythonScriptInitialized(os.getpid())
jsc = intp.getJavaSparkContext()
if sparkVersion.isImportAllPackageUnderSparkSql():
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
else:
java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
java_import(gateway.jvm, "scala.Tuple2")
_zcUserQueryNameSpace = {}
jconf = intp.getSparkConf()
conf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)
sc = _zsc_ = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
_zcUserQueryNameSpace["_zsc_"] = _zsc_
_zcUserQueryNameSpace["sc"] = sc
if sparkVersion.isSpark2():
spark = __zSpark__ = SparkSession(sc, intp.getSparkSession())
sqlc = __zSqlc__ = __zSpark__._wrapped
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = __zSqlc__
_zcUserQueryNameSpace["spark"] = spark
_zcUserQueryNameSpace["__zSpark__"] = __zSpark__
else:
sqlc = __zSqlc__ = SQLContext(sparkContext=sc, sqlContext=intp.getSQLContext())
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = sqlc
sqlContext = __zSqlc__
_zcUserQueryNameSpace["sqlContext"] = sqlContext
completion = __zeppelin_completion__ = PySparkCompletion(intp)
_zcUserQueryNameSpace["completion"] = completion
_zcUserQueryNameSpace["__zeppelin_completion__"] = __zeppelin_completion__
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext())
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["z"] = z
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
jobGroup = req.jobGroup()
jobDesc = req.jobDescription()
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
if stmts:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
sc.setJobGroup(jobGroup, jobDesc)
code = compile('\n'.join(stmts), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
intp.setStatementsFinished("", False)
except Py4JJavaError:
# raise it to outside try except
raise
except:
exception = traceback.format_exc()
m = re.search("File \"<stdin>\", line (\d+).*", exception)
if m:
line_no = int(m.group(1))
intp.setStatementsFinished(
"Fail to execute line {}: {}\n".format(line_no, stmts[line_no - 1]) + exception, True)
else:
intp.setStatementsFinished(exception, True)
else:
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
hainm/statsmodels | statsmodels/genmod/cov_struct.py | 19 | 46892 | from statsmodels.compat.python import iterkeys, itervalues, zip, range
from statsmodels.stats.correlation_tools import cov_nearest
import numpy as np
import pandas as pd
from scipy import linalg as spl
from collections import defaultdict
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
import warnings
"""
Some details for the covariance calculations can be found in the Stata
docs:
http://www.stata.com/manuals13/xtxtgee.pdf
"""
class CovStruct(object):
"""
A base class for correlation and covariance structures of grouped
data.
Each implementation of this class takes the residuals from a
regression model that has been fitted to grouped data, and uses
them to estimate the within-group dependence structure of the
random errors in the model.
The state of the covariance structure is represented through the
value of the class variable `dep_params`. The default state of a
newly-created instance should correspond to the identity
correlation matrix.
"""
def __init__(self, cov_nearest_method="clipped"):
# Parameters describing the dependency structure
self.dep_params = None
# Keep track of the number of times that the covariance was
# adjusted.
self.cov_adjust = []
# Method for projecting the covariance matrix if it not SPD.
self.cov_nearest_method = cov_nearest_method
def initialize(self, model):
"""
Called by GEE, used by implementations that need additional
setup prior to running `fit`.
Parameters
----------
model : GEE class
A reference to the parent GEE class instance.
"""
self.model = model
def update(self, params):
"""
Updates the association parameter values based on the current
regression coefficients.
Parameters
----------
params : array-like
Working values for the regression parameters.
"""
raise NotImplementedError
def covariance_matrix(self, endog_expval, index):
"""
Returns the working covariance or correlation matrix for a
given cluster of data.
Parameters
----------
endog_expval: array-like
The expected values of endog for the cluster for which the
covariance or correlation matrix will be returned
index: integer
The index of the cluster for which the covariane or
correlation matrix will be returned
Returns
-------
M: matrix
The covariance or correlation matrix of endog
is_cor: bool
True if M is a correlation matrix, False if M is a
covariance matrix
"""
raise NotImplementedError
def covariance_matrix_solve(self, expval, index, stdev, rhs):
"""
Solves matrix equations of the form `covmat * soln = rhs` and
returns the values of `soln`, where `covmat` is the covariance
matrix represented by this class.
Parameters
----------
expval: array-like
The expected value of endog for each observed value in the
group.
index: integer
The group index.
stdev : array-like
The standard deviation of endog for each observation in
the group.
rhs : list/tuple of array-like
A set of right-hand sides; each defines a matrix equation
to be solved.
Returns
-------
soln : list/tuple of array-like
The solutions to the matrix equations.
Notes
-----
Returns None if the solver fails.
Some dependence structures do not use `expval` and/or `index`
to determine the correlation matrix. Some families
(e.g. binomial) do not use the `stdev` parameter when forming
the covariance matrix.
If the covariance matrix is singular or not SPD, it is
projected to the nearest such matrix. These projection events
are recorded in the fit_history member of the GEE model.
Systems of linear equations with the covariance matrix as the
left hand side (LHS) are solved for different right hand sides
(RHS); the LHS is only factorized once to save time.
This is a default implementation, it can be reimplemented in
subclasses to optimize the linear algebra according to the
struture of the covariance matrix.
"""
vmat, is_cor = self.covariance_matrix(expval, index)
if is_cor:
vmat *= np.outer(stdev, stdev)
# Factor the covariance matrix. If the factorization fails,
# attempt to condition it into a factorizable matrix.
threshold = 1e-2
success = False
cov_adjust = 0
for itr in range(20):
try:
vco = spl.cho_factor(vmat)
success = True
break
except np.linalg.LinAlgError:
vmat = cov_nearest(vmat, method=self.cov_nearest_method,
threshold=threshold)
threshold *= 2
cov_adjust += 1
self.cov_adjust.append(cov_adjust)
# Last resort if we still can't factor the covariance matrix.
if success == False:
warnings.warn("Unable to condition covariance matrix to an SPD matrix using cov_nearest",
ConvergenceWarning)
vmat = np.diag(np.diag(vmat))
vco = spl.cho_factor(vmat)
soln = [spl.cho_solve(vco, x) for x in rhs]
return soln
def summary(self):
"""
Returns a text summary of the current estimate of the
dependence structure.
"""
raise NotImplementedError
class Independence(CovStruct):
"""
An independence working dependence structure.
"""
# Nothing to update
def update(self, params):
return
def covariance_matrix(self, expval, index):
dim = len(expval)
return np.eye(dim, dtype=np.float64), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
v = stdev**2
rslt = []
for x in rhs:
if x.ndim == 1:
rslt.append(x / v)
else:
rslt.append(x / v[:, None])
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return "Observations within a cluster are modeled as being independent."
class Exchangeable(CovStruct):
"""
An exchangeable working dependence structure.
"""
def __init__(self):
super(Exchangeable, self).__init__()
# The correlation between any two values in the same cluster
self.dep_params = 0.
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
varfunc = self.model.family.variance
cached_means = self.model.cached_means
has_weights = self.model.weights is not None
weights_li = self.model.weights
residsq_sum, scale = 0, 0
fsum1, fsum2, n_pairs = 0., 0., 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
f = weights_li[i] if has_weights else 1.
ngrp = len(resid)
residsq = np.outer(resid, resid)
scale += f * np.trace(residsq)
fsum1 += f * len(endog[i])
residsq = np.tril(residsq, -1)
residsq_sum += f * residsq.sum()
npr = 0.5 * ngrp * (ngrp - 1)
fsum2 += f * npr
n_pairs += npr
ddof = self.model.ddof_scale
scale /= (fsum1 * (nobs - ddof) / float(nobs))
residsq_sum /= scale
self.dep_params = residsq_sum / (fsum2 * (n_pairs - ddof) / float(n_pairs))
def covariance_matrix(self, expval, index):
dim = len(expval)
dp = self.dep_params * np.ones((dim, dim), dtype=np.float64)
np.fill_diagonal(dp, 1)
return dp, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
k = len(expval)
c = self.dep_params / (1. - self.dep_params)
c /= 1. + self.dep_params * (k - 1)
rslt = []
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
y = x1 / (1. - self.dep_params)
y -= c * sum(x1)
y /= stdev
else:
x1 = x / stdev[:, None]
y = x1 / (1. - self.dep_params)
y -= c * x1.sum(0)
y /= stdev[:, None]
rslt.append(y)
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("The correlation between two observations in the " +
"same cluster is %.3f" % self.dep_params)
class Nested(CovStruct):
"""
A nested working dependence structure.
A working dependence structure that captures a nested hierarchy of
groups, each level of which contributes to the random error term
of the model.
When using this working covariance structure, `dep_data` of the
GEE instance should contain a n_obs x k matrix of 0/1 indicators,
corresponding to the k subgroups nested under the top-level
`groups` of the GEE instance. These subgroups should be nested
from left to right, so that two observations with the same value
for column j of `dep_data` should also have the same value for all
columns j' < j (this only applies to observations in the same
top-level cluster given by the `groups` argument to GEE).
Examples
--------
Suppose our data are student test scores, and the students are in
classrooms, nested in schools, nested in school districts. The
school district is the highest level of grouping, so the school
district id would be provided to GEE as `groups`, and the school
and classroom id's would be provided to the Nested class as the
`dep_data` argument, e.g.
0 0 # School 0, classroom 0, student 0
0 0 # School 0, classroom 0, student 1
0 1 # School 0, classroom 1, student 0
0 1 # School 0, classroom 1, student 1
1 0 # School 1, classroom 0, student 0
1 0 # School 1, classroom 0, student 1
1 1 # School 1, classroom 1, student 0
1 1 # School 1, classroom 1, student 1
Labels lower in the hierarchy are recycled, so that student 0 in
classroom 0 is different fro student 0 in classroom 1, etc.
Notes
-----
The calculations for this dependence structure involve all pairs
of observations within a group (that is, within the top level
`group` structure passed to GEE). Large group sizes will result
in slow iterations.
The variance components are estimated using least squares
regression of the products r*r', for standardized residuals r and
r' in the same group, on a vector of indicators defining which
variance components are shared by r and r'.
"""
def initialize(self, model):
"""
Called on the first call to update
`ilabels` is a list of n_i x n_i matrices containing integer
labels that correspond to specific correlation parameters.
Two elements of ilabels[i] with the same label share identical
variance components.
`designx` is a matrix, with each row containing dummy
variables indicating which variance components are associated
with the corresponding element of QY.
"""
super(Nested, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for nested cov_struct, using unweighted covariance estimate")
# A bit of processing of the nest data
id_matrix = np.asarray(self.model.dep_data)
if id_matrix.ndim == 1:
id_matrix = id_matrix[:,None]
self.id_matrix = id_matrix
endog = self.model.endog_li
designx, ilabels = [], []
# The number of layers of nesting
n_nest = self.id_matrix.shape[1]
for i in range(self.model.num_group):
ngrp = len(endog[i])
glab = self.model.group_labels[i]
rix = self.model.group_indices[glab]
# Determine the number of common variance components
# shared by each pair of observations.
ix1, ix2 = np.tril_indices(ngrp, -1)
ncm = (self.id_matrix[rix[ix1], :] ==
self.id_matrix[rix[ix2], :]).sum(1)
# This is used to construct the working correlation
# matrix.
ilabel = np.zeros((ngrp, ngrp), dtype=np.int32)
ilabel[[ix1, ix2]] = ncm + 1
ilabel[[ix2, ix1]] = ncm + 1
ilabels.append(ilabel)
# This is used to estimate the variance components.
dsx = np.zeros((len(ix1), n_nest+1), dtype=np.float64)
dsx[:,0] = 1
for k in np.unique(ncm):
ii = np.flatnonzero(ncm == k)
dsx[ii, 1:k+1] = 1
designx.append(dsx)
self.designx = np.concatenate(designx, axis=0)
self.ilabels = ilabels
svd = np.linalg.svd(self.designx, 0)
self.designx_u = svd[0]
self.designx_s = svd[1]
self.designx_v = svd[2].T
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
dim = len(params)
if self.designx is None:
self._compute_design(self.model)
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dvmat = []
scale = 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
ix1, ix2 = np.tril_indices(len(resid), -1)
dvmat.append(resid[ix1] * resid[ix2])
scale += np.sum(resid**2)
dvmat = np.concatenate(dvmat)
scale /= (nobs - dim)
# Use least squares regression to estimate the variance
# components
vcomp_coeff = np.dot(self.designx_v, np.dot(self.designx_u.T,
dvmat) / self.designx_s)
self.vcomp_coeff = np.clip(vcomp_coeff, 0, np.inf)
self.scale = scale
self.dep_params = self.vcomp_coeff.copy()
def covariance_matrix(self, expval, index):
dim = len(expval)
# First iteration
if self.dep_params is None:
return np.eye(dim, dtype=np.float64), True
ilabel = self.ilabels[index]
c = np.r_[self.scale, np.cumsum(self.vcomp_coeff)]
vmat = c[ilabel]
vmat /= self.scale
return vmat, True
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
"""
Returns a summary string describing the state of the
dependence structure.
"""
msg = "Variance estimates\n------------------\n"
for k in range(len(self.vcomp_coeff)):
msg += "Component %d: %.3f\n" % (k+1, self.vcomp_coeff[k])
msg += "Residual: %.3f\n" % (self.scale -
np.sum(self.vcomp_coeff))
return msg
class Stationary(CovStruct):
"""
A stationary covariance structure.
The correlation between two observations is an arbitrary function
of the distance between them. Distances up to a given maximum
value are included in the covariance model.
Parameters
----------
max_lag : float
The largest distance that is included in the covariance model.
grid : bool
If True, the index positions in the data (after dropping missing
values) are used to define distances, and the `time` variable is
ignored.
"""
def __init__(self, max_lag=1, grid=False):
super(Stationary, self).__init__()
self.max_lag = max_lag
self.grid = grid
self.dep_params = np.zeros(max_lag)
def initialize(self, model):
super(Stationary, self).initialize(model)
# Time used as an index needs to be integer type.
if self.grid == False:
time = self.model.time[:, 0].astype(np.int32)
self.time = self.model.cluster_list(time)
def update(self, params):
if self.grid:
self.update_grid(params)
else:
self.update_nogrid(params)
def update_grid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
dep_params[0] += np.sum(resid * resid) / len(resid)
for j in range(1, self.max_lag + 1):
dep_params[j] += np.sum(resid[0:-j] * resid[j:]) / len(resid[j:])
self.dep_params = dep_params[1:] / dep_params[0]
def update_nogrid(self, params):
endog = self.model.endog_li
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dep_params = np.zeros(self.max_lag + 1)
dn = np.zeros(self.max_lag + 1)
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
j1, j2 = np.tril_indices(len(expval))
dx = np.abs(self.time[i][j1] - self.time[i][j2])
ii = np.flatnonzero(dx <= self.max_lag)
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
vs = np.bincount(dx, weights=resid[j1] * resid[j2], minlength=self.max_lag+1)
vd = np.bincount(dx, minlength=self.max_lag+1)
ii = np.flatnonzero(vd > 0)
dn[ii] += 1
if len(ii) > 0:
dep_params[ii] += vs[ii] / vd[ii]
dep_params /= dn
self.dep_params = dep_params[1:] / dep_params[0]
def covariance_matrix(self, endog_expval, index):
if self.grid:
return self.covariance_matrix_grid(endog_expal, index)
j1, j2 = np.tril_indices(len(endog_expval))
dx = np.abs(self.time[index][j1] - self.time[index][j2])
ii = np.flatnonzero((0 < dx) & (dx <= self.max_lag))
j1 = j1[ii]
j2 = j2[ii]
dx = dx[ii]
cmat = np.eye(len(endog_expval))
cmat[j1, j2] = self.dep_params[dx - 1]
cmat[j2, j1] = self.dep_params[dx - 1]
return cmat, True
def covariance_matrix_grid(self, endog_expval, index):
from scipy.linalg import toeplitz
r = np.zeros(len(endog_expval))
r[0] = 1
r[1:self.max_lag + 1] = self.dep_params
return toeplitz(r), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
if self.grid == False:
return super(Stationary, self).covariance_matrix_solve(expval, index, stdev, rhs)
from statsmodels.tools.linalg import stationary_solve
r = np.zeros(len(expval))
r[0:self.max_lag] = self.dep_params
return [stationary_solve(r, x) for x in rhs]
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Stationary dependence parameters\n",
self.dep_params)
class Autoregressive(CovStruct):
"""
A first-order autoregressive working dependence structure.
The dependence is defined in terms of the `time` component of the
parent GEE class, which defaults to the index position of each
value within its cluster, based on the order of values in the
input data set. Time represents a potentially multidimensional
index from which distances between pairs of observations can be
determined.
The correlation between two observations in the same cluster is
dep_params^distance, where `dep_params` contains the (scalar)
autocorrelation parameter to be estimated, and `distance` is the
distance between the two observations, calculated from their
corresponding time values. `time` is stored as an n_obs x k
matrix, where `k` represents the number of dimensions in the time
index.
The autocorrelation parameter is estimated using weighted
nonlinear least squares, regressing each value within a cluster on
each preceeding value in the same cluster.
Parameters
----------
dist_func: function from R^k x R^k to R^+, optional
A function that computes the distance between the two
observations based on their `time` values.
References
----------
B Rosner, A Munoz. Autoregressive modeling for the analysis of
longitudinal data with unequally spaced examinations. Statistics
in medicine. Vol 7, 59-71, 1988.
"""
def __init__(self, dist_func=None):
super(Autoregressive, self).__init__()
# The function for determining distances based on time
if dist_func is None:
self.dist_func = lambda x, y: np.abs(x - y).sum()
else:
self.dist_func = dist_func
self.designx = None
# The autocorrelation parameter
self.dep_params = 0.
def update(self, params):
if self.model.weights is not None:
warnings.warn("weights not implemented for autoregressive cov_struct, using unweighted covariance estimate")
endog = self.model.endog_li
time = self.model.time_li
# Only need to compute this once
if self.designx is not None:
designx = self.designx
else:
designx = []
for i in range(self.model.num_group):
ngrp = len(endog[i])
if ngrp == 0:
continue
# Loop over pairs of observations within a cluster
for j1 in range(ngrp):
for j2 in range(j1):
designx.append(self.dist_func(time[i][j1, :],
time[i][j2, :]))
designx = np.array(designx)
self.designx = designx
scale = self.model.estimate_scale()
varfunc = self.model.family.variance
cached_means = self.model.cached_means
# Weights
var = 1. - self.dep_params**(2*designx)
var /= 1. - self.dep_params**2
wts = 1. / var
wts /= wts.sum()
residmat = []
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(scale * varfunc(expval))
resid = (endog[i] - expval) / stdev
ngrp = len(resid)
for j1 in range(ngrp):
for j2 in range(j1):
residmat.append([resid[j1], resid[j2]])
residmat = np.array(residmat)
# Need to minimize this
def fitfunc(a):
dif = residmat[:, 0] - (a**designx)*residmat[:, 1]
return np.dot(dif**2, wts)
# Left bracket point
b_lft, f_lft = 0., fitfunc(0.)
# Center bracket point
b_ctr, f_ctr = 0.5, fitfunc(0.5)
while f_ctr > f_lft:
b_ctr /= 2
f_ctr = fitfunc(b_ctr)
if b_ctr < 1e-8:
self.dep_params = 0
return
# Right bracket point
b_rgt, f_rgt = 0.75, fitfunc(0.75)
while f_rgt < f_ctr:
b_rgt = b_rgt + (1. - b_rgt) / 2
f_rgt = fitfunc(b_rgt)
if b_rgt > 1. - 1e-6:
raise ValueError(
"Autoregressive: unable to find right bracket")
from scipy.optimize import brent
self.dep_params = brent(fitfunc, brack=[b_lft, b_ctr, b_rgt])
def covariance_matrix(self, endog_expval, index):
ngrp = len(endog_expval)
if self.dep_params == 0:
return np.eye(ngrp, dtype=np.float64), True
idx = np.arange(ngrp)
cmat = self.dep_params**np.abs(idx[:, None] - idx[None, :])
return cmat, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
# The inverse of an AR(1) covariance matrix is tri-diagonal.
k = len(expval)
soln = []
# LHS has 1 column
if k == 1:
return [x / stdev**2 for x in rhs]
# LHS has 2 columns
if k == 2:
mat = np.array([[1, -self.dep_params], [-self.dep_params, 1]])
mat /= (1. - self.dep_params**2)
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
else:
x1 = x / stdev[:, None]
x1 = np.dot(mat, x1)
if x.ndim == 1:
x1 /= stdev
else:
x1 /= stdev[:, None]
soln.append(x1)
return soln
# LHS has >= 3 columns: values c0, c1, c2 defined below give
# the inverse. c0 is on the diagonal, except for the first
# and last position. c1 is on the first and last position of
# the diagonal. c2 is on the sub/super diagonal.
c0 = (1. + self.dep_params**2) / (1. - self.dep_params**2)
c1 = 1. / (1. - self.dep_params**2)
c2 = -self.dep_params / (1. - self.dep_params**2)
soln = []
for x in rhs:
flatten = False
if x.ndim == 1:
x = x[:, None]
flatten = True
x1 = x / stdev[:, None]
z0 = np.zeros((1, x.shape[1]))
rhs1 = np.concatenate((x[1:,:], z0), axis=0)
rhs2 = np.concatenate((z0, x[0:-1,:]), axis=0)
y = c0*x + c2*rhs1 + c2*rhs2
y[0, :] = c1*x[0, :] + c2*x[1, :]
y[-1, :] = c1*x[-1, :] + c2*x[-2, :]
y /= stdev[:, None]
if flatten:
y = np.squeeze(y)
soln.append(y)
return soln
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Autoregressive(1) dependence parameter: %.3f\n" %
self.dep_params)
class CategoricalCovStruct(CovStruct):
"""
Parent class for covariance structure for categorical data models.
Attributes
----------
nlevel : int
The number of distinct levels for the outcome variable.
ibd : list
A list whose i^th element ibd[i] is an array whose rows
contain integer pairs (a,b), where endog_li[i][a:b] is the
subvector of binary indicators derived from the same ordinal
value.
"""
def initialize(self, model):
super(CategoricalCovStruct, self).initialize(model)
self.nlevel = len(model.endog_values)
self._ncut = self.nlevel - 1
from numpy.lib.stride_tricks import as_strided
b = np.dtype(np.int64).itemsize
ibd = []
for v in model.endog_li:
jj = np.arange(0, len(v) + 1, self._ncut, dtype=np.int64)
jj = as_strided(jj, shape=(len(jj) - 1, 2), strides=(b, b))
ibd.append(jj)
self.ibd = ibd
class GlobalOddsRatio(CategoricalCovStruct):
"""
Estimate the global odds ratio for a GEE with ordinal or nominal
data.
References
----------
PJ Heagerty and S Zeger. "Marginal Regression Models for Clustered
Ordinal Measurements". Journal of the American Statistical
Association Vol. 91, Issue 435 (1996).
Thomas Lumley. Generalized Estimating Equations for Ordinal Data:
A Note on Working Correlation Structures. Biometrics Vol. 52,
No. 1 (Mar., 1996), pp. 354-361
http://www.jstor.org/stable/2533173
Notes
-----
The following data structures are calculated in the class:
'ibd' is a list whose i^th element ibd[i] is a sequence of integer
pairs (a,b), where endog_li[i][a:b] is the subvector of binary
indicators derived from the same ordinal value.
`cpp` is a dictionary where cpp[group] is a map from cut-point
pairs (c,c') to the indices of all between-subject pairs derived
from the given cut points.
"""
def __init__(self, endog_type):
super(GlobalOddsRatio, self).__init__()
self.endog_type = endog_type
self.dep_params = 0.
def initialize(self, model):
super(GlobalOddsRatio, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for GlobalOddsRatio cov_struct, using unweighted covariance estimate")
# Need to restrict to between-subject pairs
cpp = []
for v in model.endog_li:
# Number of subjects in this group
m = int(len(v) / self._ncut)
i1, i2 = np.tril_indices(m, -1)
cpp1 = {}
for k1 in range(self._ncut):
for k2 in range(k1+1):
jj = np.zeros((len(i1), 2), dtype=np.int64)
jj[:, 0] = i1*self._ncut + k1
jj[:, 1] = i2*self._ncut + k2
cpp1[(k2, k1)] = jj
cpp.append(cpp1)
self.cpp = cpp
# Initialize the dependence parameters
self.crude_or = self.observed_crude_oddsratio()
if self.model.update_dep:
self.dep_params = self.crude_or
def pooled_odds_ratio(self, tables):
"""
Returns the pooled odds ratio for a list of 2x2 tables.
The pooled odds ratio is the inverse variance weighted average
of the sample odds ratios of the tables.
"""
if len(tables) == 0:
return 1.
# Get the sampled odds ratios and variances
log_oddsratio, var = [], []
for table in tables:
lor = np.log(table[1, 1]) + np.log(table[0, 0]) -\
np.log(table[0, 1]) - np.log(table[1, 0])
log_oddsratio.append(lor)
var.append((1 / table.astype(np.float64)).sum())
# Calculate the inverse variance weighted average
wts = [1 / v for v in var]
wtsum = sum(wts)
wts = [w / wtsum for w in wts]
log_pooled_or = sum([w*e for w, e in zip(wts, log_oddsratio)])
return np.exp(log_pooled_or)
def covariance_matrix(self, expected_value, index):
vmat = self.get_eyy(expected_value, index)
vmat -= np.outer(expected_value, expected_value)
return vmat, False
def observed_crude_oddsratio(self):
"""
To obtain the crude (global) odds ratio, first pool all binary
indicators corresponding to a given pair of cut points (c,c'),
then calculate the odds ratio for this 2x2 table. The crude
odds ratio is the inverse variance weighted average of these
odds ratios. Since the covariate effects are ignored, this OR
will generally be greater than the stratified OR.
"""
cpp = self.cpp
endog = self.model.endog_li
# Storage for the contingency tables for each (c,c')
tables = {}
for ii in iterkeys(cpp[0]):
tables[ii] = np.zeros((2, 2), dtype=np.float64)
# Get the observed crude OR
for i in range(len(endog)):
# The observed joint values for the current cluster
yvec = endog[i]
endog_11 = np.outer(yvec, yvec)
endog_10 = np.outer(yvec, 1. - yvec)
endog_01 = np.outer(1. - yvec, yvec)
endog_00 = np.outer(1. - yvec, 1. - yvec)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += endog_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += endog_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += endog_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += endog_00[ix[:, 0], ix[:, 1]].sum()
return self.pooled_odds_ratio(list(itervalues(tables)))
def get_eyy(self, endog_expval, index):
"""
Returns a matrix V such that V[i,j] is the joint probability
that endog[i] = 1 and endog[j] = 1, based on the marginal
probabilities of endog and the global odds ratio `current_or`.
"""
current_or = self.dep_params
ibd = self.ibd[index]
# The between-observation joint probabilities
if current_or == 1.0:
vmat = np.outer(endog_expval, endog_expval)
else:
psum = endog_expval[:, None] + endog_expval[None, :]
pprod = endog_expval[:, None] * endog_expval[None, :]
pfac = np.sqrt((1. + psum * (current_or - 1.))**2 +
4 * current_or * (1. - current_or) * pprod)
vmat = 1. + psum * (current_or - 1.) - pfac
vmat /= 2. * (current_or - 1)
# Fix E[YY'] for elements that belong to same observation
for bdl in ibd:
evy = endog_expval[bdl[0]:bdl[1]]
if self.endog_type == "ordinal":
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(evy, evy)
else:
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] = np.diag(evy)
return vmat
def update(self, params):
"""
Update the global odds ratio based on the current value of
params.
"""
endog = self.model.endog_li
cpp = self.cpp
cached_means = self.model.cached_means
# This will happen if all the clusters have only
# one observation
if len(cpp[0]) == 0:
return
tables = {}
for ii in cpp[0]:
tables[ii] = np.zeros((2, 2), dtype=np.float64)
for i in range(self.model.num_group):
endog_expval, _ = cached_means[i]
emat_11 = self.get_eyy(endog_expval, i)
emat_10 = endog_expval[:, None] - emat_11
emat_01 = -emat_11 + endog_expval
emat_00 = 1. - (emat_11 + emat_10 + emat_01)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += emat_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += emat_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += emat_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += emat_00[ix[:, 0], ix[:, 1]].sum()
cor_expval = self.pooled_odds_ratio(list(itervalues(tables)))
self.dep_params *= self.crude_or / cor_expval
if not np.isfinite(self.dep_params):
self.dep_params = 1.
warnings.warn("dep_params became inf, resetting to 1",
ConvergenceWarning)
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
return "Global odds ratio: %.3f\n" % self.dep_params
class OrdinalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for ordinal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.minimum.outer(ev, ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class NominalIndependence(CategoricalCovStruct):
"""
An independence covariance structure for nominal models.
The working covariance between indicators derived from different
observations is zero. The working covariance between indicators
derived form a common observation is determined from their current
mean values.
There are no parameters to estimate in this covariance structure.
"""
def covariance_matrix(self, expected_value, index):
ibd = self.ibd[index]
n = len(expected_value)
vmat = np.zeros((n, n))
for bdl in ibd:
ev = expected_value[bdl[0]:bdl[1]]
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] =\
np.diag(ev) - np.outer(ev, ev)
return vmat, False
# Nothing to update
def update(self, params):
pass
class Equivalence(CovStruct):
"""
A covariance structure defined in terms of equivalence classes.
An 'equivalence class' is a set of pairs of observations such that
the covariance of every pair within the equivalence class has a
common value.
Parameters
----------
pairs : dict-like
A dictionary of dictionaries, where `pairs[group][label]`
provides the indices of all pairs of observations in the group
that have the same covariance value. Specifically,
`pairs[group][label]` is a tuple `(j1, j2)`, where `j1` and `j2`
are integer arrays of the same length. `j1[i], j2[i]` is one
index pair that belongs to the `label` equivalence class. Only
one triangle of each covariance matrix should be included.
Positions where j1 and j2 have the same value are variance
parameters.
labels : array-like
An array of labels such that every distinct pair of labels
defines an equivalence class. Either `labels` or `pairs` must
be provided. When the two labels in a pair are equal two
equivalence classes are defined: one for the diagonal elements
(corresponding to variances) and one for the off-diagonal
elements (corresponding to covariances).
return_cov : boolean
If True, `covariance_matrix` returns an estimate of the
covariance matrix, otherwise returns an estimate of the
correlation matrix.
Notes
-----
Using `labels` to define the class is much easier than using
`pairs`, but is less general.
Any pair of values not contained in `pairs` will be assigned zero
covariance.
The index values in `pairs` are row indices into the `exog`
matrix. They are not updated if missing data are present. When
using this covariance structure, missing data should be removed
before constructing the model.
If using `labels`, after a model is defined using the covariance
structure it is possible to remove a label pair from the second
level of the `pairs` dictionary to force the corresponding
covariance to be zero.
Examples
--------
The following sets up the `pairs` dictionary for a model with two
groups, equal variance for all observations, and constant
covariance for all pairs of observations within each group.
>> pairs = {0: {}, 1: {}}
>> pairs[0][0] = (np.r_[0, 1, 2], np.r_[0, 1, 2])
>> pairs[0][1] = np.tril_indices(3, -1)
>> pairs[1][0] = (np.r_[3, 4, 5], np.r_[3, 4, 5])
>> pairs[1][2] = 3 + np.tril_indices(3, -1)
"""
def __init__(self, pairs=None, labels=None, return_cov=False):
super(Equivalence, self).__init__()
if (pairs is None) and (labels is None):
raise ValueError("Equivalence cov_struct requires either `pairs` or `labels`")
if (pairs is not None) and (labels is not None):
raise ValueError("Equivalence cov_struct accepts only one of `pairs` and `labels`")
if pairs is not None:
import copy
self.pairs = copy.deepcopy(pairs)
if labels is not None:
self.labels = np.asarray(labels)
self.return_cov = return_cov
def _make_pairs(self, i, j):
"""
Create arrays `i_`, `j_` containing all unique ordered pairs of elements in `i` and `j`.
The arrays `i` and `j` must be one-dimensional containing non-negative integers.
"""
mat = np.zeros((len(i)*len(j), 2), dtype=np.int32)
# Create the pairs and order them
f = np.ones(len(j))
mat[:, 0] = np.kron(f, i).astype(np.int32)
f = np.ones(len(i))
mat[:, 1] = np.kron(j, f).astype(np.int32)
mat.sort(1)
# Remove repeated rows
try:
dtype = np.dtype((np.void, mat.dtype.itemsize * mat.shape[1]))
bmat = np.ascontiguousarray(mat).view(dtype)
_, idx = np.unique(bmat, return_index=True)
except TypeError:
# workaround for old numpy that can't call unique with complex
# dtypes
np.random.seed(4234)
bmat = np.dot(mat, np.random.uniform(size=mat.shape[1]))
_, idx = np.unique(bmat, return_index=True)
mat = mat[idx, :]
return mat[:, 0], mat[:, 1]
def _pairs_from_labels(self):
from collections import defaultdict
pairs = defaultdict(lambda : defaultdict(lambda : None))
model = self.model
df = pd.DataFrame({"labels": self.labels, "groups": model.groups})
gb = df.groupby(["groups", "labels"])
ulabels = np.unique(self.labels)
for g_ix, g_lb in enumerate(model.group_labels):
# Loop over label pairs
for lx1 in range(len(ulabels)):
for lx2 in range(lx1+1):
lb1 = ulabels[lx1]
lb2 = ulabels[lx2]
try:
i1 = gb.groups[(g_lb, lb1)]
i2 = gb.groups[(g_lb, lb2)]
except KeyError:
continue
i1, i2 = self._make_pairs(i1, i2)
clabel = str(lb1) + "/" + str(lb2)
# Variance parameters belong in their own equiv class.
jj = np.flatnonzero(i1 == i2)
if len(jj) > 0:
clabelv = clabel + "/v"
pairs[g_lb][clabelv] = (i1[jj], i2[jj])
# Covariance parameters
jj = np.flatnonzero(i1 != i2)
if len(jj) > 0:
i1 = i1[jj]
i2 = i2[jj]
pairs[g_lb][clabel] = (i1, i2)
self.pairs = pairs
def initialize(self, model):
super(Equivalence, self).initialize(model)
if self.model.weights is not None:
warnings.warn("weights not implemented for equalence cov_struct, using unweighted covariance estimate")
if not hasattr(self, 'pairs'):
self._pairs_from_labels()
# Initialize so that any equivalence class containing a
# variance parameter has value 1.
self.dep_params = defaultdict(lambda : 0.)
self._var_classes = set([])
for gp in self.model.group_labels:
for lb in self.pairs[gp]:
j1, j2 = self.pairs[gp][lb]
if np.any(j1 == j2):
if not np.all(j1 == j2):
warnings.warn("equivalence class contains both variance and covariance parameters")
self._var_classes.add(lb)
self.dep_params[lb] = 1
# Need to start indexing at 0 within each group.
# rx maps olds indices to new indices
rx = -1 * np.ones(len(self.model.endog), dtype=np.int32)
for g_ix, g_lb in enumerate(self.model.group_labels):
ii = self.model.group_indices[g_lb]
rx[ii] = np.arange(len(ii), dtype=np.int32)
# Reindex
for gp in self.model.group_labels:
for lb in self.pairs[gp].keys():
a, b = self.pairs[gp][lb]
self.pairs[gp][lb] = (rx[a], rx[b])
def update(self, params):
endog = self.model.endog_li
varfunc = self.model.family.variance
cached_means = self.model.cached_means
dep_params = defaultdict(lambda : [0., 0., 0.])
n_pairs = defaultdict(lambda : 0)
dim = len(params)
for k, gp in enumerate(self.model.group_labels):
expval, _ = cached_means[k]
stdev = np.sqrt(varfunc(expval))
resid = (endog[k] - expval) / stdev
for lb in self.pairs[gp].keys():
if (not self.return_cov) and lb in self._var_classes:
continue
jj = self.pairs[gp][lb]
dep_params[lb][0] += np.sum(resid[jj[0]] * resid[jj[1]])
if not self.return_cov:
dep_params[lb][1] += np.sum(resid[jj[0]]**2)
dep_params[lb][2] += np.sum(resid[jj[1]]**2)
n_pairs[lb] += len(jj[0])
if self.return_cov:
for lb in dep_params.keys():
dep_params[lb] = dep_params[lb][0] / (n_pairs[lb] - dim)
else:
for lb in dep_params.keys():
den = np.sqrt(dep_params[lb][1] * dep_params[lb][2])
dep_params[lb] = dep_params[lb][0] / den
for lb in self._var_classes:
dep_params[lb] = 1.
self.dep_params = dep_params
self.n_pairs = n_pairs
def covariance_matrix(self, expval, index):
dim = len(expval)
cmat = np.zeros((dim, dim))
g_lb = self.model.group_labels[index]
for lb in self.pairs[g_lb].keys():
j1, j2 = self.pairs[g_lb][lb]
cmat[j1, j2] = self.dep_params[lb]
cmat = cmat + cmat.T
np.fill_diagonal(cmat, cmat.diagonal() / 2)
return cmat, not self.return_cov
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
| bsd-3-clause |
mlyundin/scikit-learn | sklearn/covariance/tests/test_covariance.py | 69 | 11116 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
kaiserroll14/301finalproject | main/pandas/core/dtypes.py | 9 | 5492 | """ define extension dtypes """
import re
import numpy as np
from pandas import compat
class ExtensionDtype(object):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
THIS IS NOT A REAL NUMPY DTYPE
"""
name = None
names = None
type = None
subdtype = None
kind = None
str = None
num = 100
shape = tuple()
itemsize = 8
base = None
isbuiltin = 0
isnative = 0
_metadata = []
def __unicode__(self):
return self.name
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
def __hash__(self):
raise NotImplementedError("sub-classes should implement an __hash__ method")
def __eq__(self, other):
raise NotImplementedError("sub-classes should implement an __eq__ method")
@classmethod
def is_dtype(cls, dtype):
""" Return a boolean if we if the passed type is an actual dtype that we can match (via string or type) """
if hasattr(dtype, 'dtype'):
dtype = dtype.dtype
if isinstance(dtype, cls):
return True
elif isinstance(dtype, np.dtype):
return False
try:
return cls.construct_from_string(dtype) is not None
except:
return False
class CategoricalDtypeType(type):
"""
the type of CategoricalDtype, this metaclass determines subclass ability
"""
pass
class CategoricalDtype(ExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom categorical dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object
"""
name = 'category'
type = CategoricalDtypeType
kind = 'O'
str = '|O08'
base = np.dtype('O')
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return isinstance(other, CategoricalDtype)
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if its not possible """
try:
if string == 'category':
return cls()
except:
pass
raise TypeError("cannot construct a CategoricalDtype")
class DatetimeTZDtypeType(type):
"""
the type of DatetimeTZDtype, this metaclass determines subclass ability
"""
pass
class DatetimeTZDtype(ExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom datetime with tz dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.datetime64[ns]
"""
type = DatetimeTZDtypeType
kind = 'M'
str = '|M8[ns]'
num = 101
base = np.dtype('M8[ns]')
_metadata = ['unit','tz']
_match = re.compile("(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
def __init__(self, unit, tz=None):
"""
Parameters
----------
unit : string unit that this represents, currently must be 'ns'
tz : string tz that this represents
"""
if isinstance(unit, DatetimeTZDtype):
self.unit, self.tz = unit.unit, unit.tz
return
if tz is None:
# we were passed a string that we can construct
try:
m = self._match.search(unit)
if m is not None:
self.unit = m.groupdict()['unit']
self.tz = m.groupdict()['tz']
return
except:
raise ValueError("could not construct DatetimeTZDtype")
raise ValueError("DatetimeTZDtype constructor must have a tz supplied")
if unit != 'ns':
raise ValueError("DatetimeTZDtype only supports ns units")
self.unit = unit
self.tz = tz
@classmethod
def construct_from_string(cls, string):
""" attempt to construct this type from a string, raise a TypeError if its not possible """
try:
return cls(unit=string)
except ValueError:
raise TypeError("could not construct DatetimeTZDtype")
def __unicode__(self):
# format the tz
return "datetime64[{unit}, {tz}]".format(unit=self.unit, tz=self.tz)
@property
def name(self):
return str(self)
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return isinstance(other, DatetimeTZDtype) and self.unit == other.unit and self.tz == other.tz
| gpl-3.0 |
joshloyal/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 143 | 9461 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.model_selection import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/core/dtypes/common.py | 4 | 49844 | """ common type operations """
import numpy as np
from pandas.compat import (string_types, text_type, binary_type,
PY3, PY36)
from pandas._libs import algos, lib
from .dtypes import (CategoricalDtype, CategoricalDtypeType,
DatetimeTZDtype, DatetimeTZDtypeType,
PeriodDtype, PeriodDtypeType,
IntervalDtype, IntervalDtypeType,
ExtensionDtype)
from .generic import (ABCCategorical, ABCPeriodIndex,
ABCDatetimeIndex, ABCSeries,
ABCSparseArray, ABCSparseSeries)
from .inference import is_string_like
from .inference import * # noqa
_POSSIBLY_CAST_DTYPES = set([np.dtype(t).name
for t in ['O', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64']])
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
# oh the troubles to reduce import time
_is_scipy_sparse = None
_ensure_float64 = algos.ensure_float64
_ensure_float32 = algos.ensure_float32
def _ensure_float(arr):
"""
Ensure that an array object has a float dtype if possible.
Parameters
----------
arr : array-like
The array whose data type we want to enforce as float.
Returns
-------
float_arr : The original array cast to the float dtype if
possible. Otherwise, the original array is returned.
"""
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
arr = arr.astype(float)
return arr
_ensure_uint64 = algos.ensure_uint64
_ensure_int64 = algos.ensure_int64
_ensure_int32 = algos.ensure_int32
_ensure_int16 = algos.ensure_int16
_ensure_int8 = algos.ensure_int8
_ensure_platform_int = algos.ensure_platform_int
_ensure_object = algos.ensure_object
def _ensure_categorical(arr):
"""
Ensure that an array-like object is a Categorical (if not already).
Parameters
----------
arr : array-like
The array that we want to convert into a Categorical.
Returns
-------
cat_arr : The original array cast as a Categorical. If it already
is a Categorical, we return as is.
"""
if not is_categorical(arr):
from pandas import Categorical
arr = Categorical(arr)
return arr
def is_object_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the object dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is of the object dtype.
Examples
--------
>>> is_object_dtype(object)
True
>>> is_object_dtype(int)
False
>>> is_object_dtype(np.array([], dtype=object))
True
>>> is_object_dtype(np.array([], dtype=int))
False
>>> is_object_dtype([1, 2, 3])
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.object_)
def is_sparse(arr):
"""
Check whether an array-like is a pandas sparse array.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a pandas sparse array.
Examples
--------
>>> is_sparse(np.array([1, 2, 3]))
False
>>> is_sparse(pd.SparseArray([1, 2, 3]))
True
>>> is_sparse(pd.SparseSeries([1, 2, 3]))
True
This function checks only for pandas sparse array instances, so
sparse arrays from other libraries will return False.
>>> from scipy.sparse import bsr_matrix
>>> is_sparse(bsr_matrix([1, 2, 3]))
False
"""
return isinstance(arr, (ABCSparseArray, ABCSparseSeries))
def is_scipy_sparse(arr):
"""
Check whether an array-like is a scipy.sparse.spmatrix instance.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a
scipy.sparse.spmatrix instance.
Notes
-----
If scipy is not installed, this function will always return False.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
True
>>> is_scipy_sparse(pd.SparseArray([1, 2, 3]))
False
>>> is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
False
"""
global _is_scipy_sparse
if _is_scipy_sparse is None:
try:
from scipy.sparse import issparse as _is_scipy_sparse
except ImportError:
_is_scipy_sparse = lambda _: False
return _is_scipy_sparse(arr)
def is_categorical(arr):
"""
Check whether an array-like is a Categorical instance.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is of a Categorical instance.
Examples
--------
>>> is_categorical([1, 2, 3])
False
Categoricals, Series Categoricals, and CategoricalIndex will return True.
>>> cat = pd.Categorical([1, 2, 3])
>>> is_categorical(cat)
True
>>> is_categorical(pd.Series(cat))
True
>>> is_categorical(pd.CategoricalIndex([1, 2, 3]))
True
"""
return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr)
def is_datetimetz(arr):
"""
Check whether an array-like is a datetime array-like with a timezone
component in its dtype.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a datetime array-like with
a timezone component in its dtype.
Examples
--------
>>> is_datetimetz([1, 2, 3])
False
Although the following examples are both DatetimeIndex objects,
the first one returns False because it has no timezone component
unlike the second one, which returns True.
>>> is_datetimetz(pd.DatetimeIndex([1, 2, 3]))
False
>>> is_datetimetz(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
The object need not be a DatetimeIndex object. It just needs to have
a dtype which has a timezone component.
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_datetimetz(s)
True
"""
# TODO: do we need this function?
# It seems like a repeat of is_datetime64tz_dtype.
return ((isinstance(arr, ABCDatetimeIndex) and
getattr(arr, 'tz', None) is not None) or
is_datetime64tz_dtype(arr))
def is_period(arr):
"""
Check whether an array-like is a periodical index.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a periodical index.
Examples
--------
>>> is_period([1, 2, 3])
False
>>> is_period(pd.Index([1, 2, 3]))
False
>>> is_period(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
"""
# TODO: do we need this function?
# It seems like a repeat of is_period_arraylike.
return isinstance(arr, ABCPeriodIndex) or is_period_arraylike(arr)
def is_datetime64_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is of
the datetime64 dtype.
Examples
--------
>>> is_datetime64_dtype(object)
False
>>> is_datetime64_dtype(np.datetime64)
True
>>> is_datetime64_dtype(np.array([], dtype=int))
False
>>> is_datetime64_dtype(np.array([], dtype=np.datetime64))
True
>>> is_datetime64_dtype([1, 2, 3])
False
"""
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype_type(arr_or_dtype)
except TypeError:
return False
return issubclass(tipo, np.datetime64)
def is_datetime64tz_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of a DatetimeTZDtype dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is of
a DatetimeTZDtype dtype.
Examples
--------
>>> is_datetime64tz_dtype(object)
False
>>> is_datetime64tz_dtype([1, 2, 3])
False
>>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive
False
>>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_datetime64tz_dtype(dtype)
True
>>> is_datetime64tz_dtype(s)
True
"""
if arr_or_dtype is None:
return False
return DatetimeTZDtype.is_dtype(arr_or_dtype)
def is_timedelta64_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the timedelta64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is
of the timedelta64 dtype.
Examples
--------
>>> is_timedelta64_dtype(object)
False
>>> is_timedelta64_dtype(np.timedelta64)
True
>>> is_timedelta64_dtype([1, 2, 3])
False
>>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
True
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.timedelta64)
def is_period_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the Period dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is of the Period dtype.
Examples
--------
>>> is_period_dtype(object)
False
>>> is_period_dtype(PeriodDtype(freq="D"))
True
>>> is_period_dtype([1, 2, 3])
False
>>> is_period_dtype(pd.Period("2017-01-01"))
False
>>> is_period_dtype(pd.PeriodIndex([], freq="A"))
True
"""
# TODO: Consider making Period an instance of PeriodDtype
if arr_or_dtype is None:
return False
return PeriodDtype.is_dtype(arr_or_dtype)
def is_interval_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the Interval dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is
of the Interval dtype.
Examples
--------
>>> is_interval_dtype(object)
False
>>> is_interval_dtype(IntervalDtype())
True
>>> is_interval_dtype([1, 2, 3])
False
>>>
>>> interval = pd.Interval(1, 2, closed="right")
>>> is_interval_dtype(interval)
False
>>> is_interval_dtype(pd.IntervalIndex([interval]))
True
"""
# TODO: Consider making Interval an instance of IntervalDtype
if arr_or_dtype is None:
return False
return IntervalDtype.is_dtype(arr_or_dtype)
def is_categorical_dtype(arr_or_dtype):
"""
Check whether an array-like or dtype is of the Categorical dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype to check.
Returns
-------
boolean : Whether or not the array-like or dtype is
of the Categorical dtype.
Examples
--------
>>> is_categorical_dtype(object)
False
>>> is_categorical_dtype(CategoricalDtype())
True
>>> is_categorical_dtype([1, 2, 3])
False
>>> is_categorical_dtype(pd.Categorical([1, 2, 3]))
True
>>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
True
"""
if arr_or_dtype is None:
return False
return CategoricalDtype.is_dtype(arr_or_dtype)
def is_string_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the string dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the string dtype.
Examples
--------
>>> is_string_dtype(str)
True
>>> is_string_dtype(object)
True
>>> is_string_dtype(int)
False
>>>
>>> is_string_dtype(np.array(['a', 'b']))
True
>>> is_string_dtype(pd.Series([1, 2]))
False
"""
# TODO: gh-15585: consider making the checks stricter.
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype)
except TypeError:
return False
def is_period_arraylike(arr):
"""
Check whether an array-like is a periodical array-like or PeriodIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a periodical
array-like or PeriodIndex instance.
Examples
--------
>>> is_period_arraylike([1, 2, 3])
False
>>> is_period_arraylike(pd.Index([1, 2, 3]))
False
>>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
"""
if isinstance(arr, ABCPeriodIndex):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return arr.dtype == object and lib.infer_dtype(arr) == 'period'
return getattr(arr, 'inferred_type', None) == 'period'
def is_datetime_arraylike(arr):
"""
Check whether an array-like is a datetime array-like or DatetimeIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a datetime
array-like or DatetimeIndex.
Examples
--------
>>> is_datetime_arraylike([1, 2, 3])
False
>>> is_datetime_arraylike(pd.Index([1, 2, 3]))
False
>>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
True
"""
if isinstance(arr, ABCDatetimeIndex):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return arr.dtype == object and lib.infer_dtype(arr) == 'datetime'
return getattr(arr, 'inferred_type', None) == 'datetime'
def is_datetimelike(arr):
"""
Check whether an array-like is a datetime-like array-like.
Acceptable datetime-like objects are (but not limited to) datetime
indices, periodic indices, and timedelta indices.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is a datetime-like array-like.
Examples
--------
>>> is_datetimelike([1, 2, 3])
False
>>> is_datetimelike(pd.Index([1, 2, 3]))
False
>>> is_datetimelike(pd.DatetimeIndex([1, 2, 3]))
True
>>> is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>> is_datetimelike(pd.PeriodIndex([], freq="A"))
True
>>> is_datetimelike(np.array([], dtype=np.datetime64))
True
>>> is_datetimelike(pd.Series([], dtype="timedelta64[ns]"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_datetimelike(s)
True
"""
return (is_datetime64_dtype(arr) or is_datetime64tz_dtype(arr) or
is_timedelta64_dtype(arr) or
isinstance(arr, ABCPeriodIndex) or
is_datetimetz(arr))
def is_dtype_equal(source, target):
"""
Check if two dtypes are equal.
Parameters
----------
source : The first dtype to compare
target : The second dtype to compare
Returns
----------
boolean : Whether or not the two dtypes are equal.
Examples
--------
>>> is_dtype_equal(int, float)
False
>>> is_dtype_equal("int", int)
True
>>> is_dtype_equal(object, "category")
False
>>> is_dtype_equal(CategoricalDtype(), "category")
True
>>> is_dtype_equal(DatetimeTZDtype(), "datetime64")
False
"""
try:
source = _get_dtype(source)
target = _get_dtype(target)
return source == target
except (TypeError, AttributeError):
# invalid comparison
# object == category will hit this
return False
def is_any_int_dtype(arr_or_dtype):
"""
DEPRECATED: This function will be removed in a future version.
Check whether the provided array or dtype is of an integer dtype.
In this function, timedelta64 instances are also considered "any-integer"
type objects and will return True.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of an integer dtype.
Examples
--------
>>> is_any_int_dtype(str)
False
>>> is_any_int_dtype(int)
True
>>> is_any_int_dtype(float)
False
>>> is_any_int_dtype(np.uint64)
True
>>> is_any_int_dtype(np.datetime64)
False
>>> is_any_int_dtype(np.timedelta64)
True
>>> is_any_int_dtype(np.array(['a', 'b']))
False
>>> is_any_int_dtype(pd.Series([1, 2]))
True
>>> is_any_int_dtype(np.array([], dtype=np.timedelta64))
True
>>> is_any_int_dtype(pd.Index([1, 2.])) # float
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.integer)
def is_integer_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of an integer dtype.
Unlike in `in_any_int_dtype`, timedelta64 instances will return False.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of an integer dtype
and not an instance of timedelta64.
Examples
--------
>>> is_integer_dtype(str)
False
>>> is_integer_dtype(int)
True
>>> is_integer_dtype(float)
False
>>> is_integer_dtype(np.uint64)
True
>>> is_integer_dtype(np.datetime64)
False
>>> is_integer_dtype(np.timedelta64)
False
>>> is_integer_dtype(np.array(['a', 'b']))
False
>>> is_integer_dtype(pd.Series([1, 2]))
True
>>> is_integer_dtype(np.array([], dtype=np.timedelta64))
False
>>> is_integer_dtype(pd.Index([1, 2.])) # float
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_signed_integer_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a signed integer dtype.
Unlike in `in_any_int_dtype`, timedelta64 instances will return False.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a signed integer dtype
and not an instance of timedelta64.
Examples
--------
>>> is_signed_integer_dtype(str)
False
>>> is_signed_integer_dtype(int)
True
>>> is_signed_integer_dtype(float)
False
>>> is_signed_integer_dtype(np.uint64) # unsigned
False
>>> is_signed_integer_dtype(np.datetime64)
False
>>> is_signed_integer_dtype(np.timedelta64)
False
>>> is_signed_integer_dtype(np.array(['a', 'b']))
False
>>> is_signed_integer_dtype(pd.Series([1, 2]))
True
>>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
False
>>> is_signed_integer_dtype(pd.Index([1, 2.])) # float
False
>>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.signedinteger) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_unsigned_integer_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of an unsigned integer dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of an
unsigned integer dtype.
Examples
--------
>>> is_unsigned_integer_dtype(str)
False
>>> is_unsigned_integer_dtype(int) # signed
False
>>> is_unsigned_integer_dtype(float)
False
>>> is_unsigned_integer_dtype(np.uint64)
True
>>> is_unsigned_integer_dtype(np.array(['a', 'b']))
False
>>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed
False
>>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float
False
>>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
True
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.unsignedinteger) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_int64_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the int64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the int64 dtype.
Notes
-----
Depending on system architecture, the return value of `is_int64_dtype(
int)` will be True if the OS uses 64-bit integers and False if the OS
uses 32-bit integers.
Examples
--------
>>> is_int64_dtype(str)
False
>>> is_int64_dtype(np.int32)
False
>>> is_int64_dtype(np.int64)
True
>>> is_int64_dtype(float)
False
>>> is_int64_dtype(np.uint64) # unsigned
False
>>> is_int64_dtype(np.array(['a', 'b']))
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.int64))
True
>>> is_int64_dtype(pd.Index([1, 2.])) # float
False
>>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.int64)
def is_int_or_datetime_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of an
integer, timedelta64, or datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of an
integer, timedelta64, or datetime64 dtype.
Examples
--------
>>> is_int_or_datetime_dtype(str)
False
>>> is_int_or_datetime_dtype(int)
True
>>> is_int_or_datetime_dtype(float)
False
>>> is_int_or_datetime_dtype(np.uint64)
True
>>> is_int_or_datetime_dtype(np.datetime64)
True
>>> is_int_or_datetime_dtype(np.timedelta64)
True
>>> is_int_or_datetime_dtype(np.array(['a', 'b']))
False
>>> is_int_or_datetime_dtype(pd.Series([1, 2]))
True
>>> is_int_or_datetime_dtype(np.array([], dtype=np.timedelta64))
True
>>> is_int_or_datetime_dtype(np.array([], dtype=np.datetime64))
True
>>> is_int_or_datetime_dtype(pd.Index([1, 2.])) # float
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) or
issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_datetime64_any_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the datetime64 dtype.
Examples
--------
>>> is_datetime64_any_dtype(str)
False
>>> is_datetime64_any_dtype(int)
False
>>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive
True
>>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
True
>>> is_datetime64_any_dtype(np.array(['a', 'b']))
False
>>> is_datetime64_any_dtype(np.array([1, 2]))
False
>>> is_datetime64_any_dtype(np.array([], dtype=np.datetime64))
True
>>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3],
dtype=np.datetime64))
True
"""
if arr_or_dtype is None:
return False
return (is_datetime64_dtype(arr_or_dtype) or
is_datetime64tz_dtype(arr_or_dtype))
def is_datetime64_ns_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the datetime64[ns] dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the datetime64[ns] dtype.
Examples
--------
>>> is_datetime64_ns_dtype(str)
False
>>> is_datetime64_ns_dtype(int)
False
>>> is_datetime64_ns_dtype(np.datetime64) # no unit
False
>>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
True
>>> is_datetime64_ns_dtype(np.array(['a', 'b']))
False
>>> is_datetime64_ns_dtype(np.array([1, 2]))
False
>>> is_datetime64_ns_dtype(np.array([], dtype=np.datetime64)) # no unit
False
>>> is_datetime64_ns_dtype(np.array([],
dtype="datetime64[ps]")) # wrong unit
False
>>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3],
dtype=np.datetime64)) # has 'ns' unit
True
"""
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype(arr_or_dtype)
except TypeError:
if is_datetime64tz_dtype(arr_or_dtype):
tipo = _get_dtype(arr_or_dtype.dtype)
else:
return False
return tipo == _NS_DTYPE or getattr(tipo, 'base', None) == _NS_DTYPE
def is_timedelta64_ns_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the timedelta64[ns] dtype.
This is a very specific dtype, so generic ones like `np.timedelta64`
will return False if passed into this function.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the
timedelta64[ns] dtype.
Examples
--------
>>> is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
True
>>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency
False
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
True
>>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))
False
"""
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype(arr_or_dtype)
return tipo == _TD_DTYPE
except TypeError:
return False
def is_datetime_or_timedelta_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of
a timedelta64 or datetime64 dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a
timedelta64, or datetime64 dtype.
Examples
--------
>>> is_datetime_or_timedelta_dtype(str)
False
>>> is_datetime_or_timedelta_dtype(int)
False
>>> is_datetime_or_timedelta_dtype(np.datetime64)
True
>>> is_datetime_or_timedelta_dtype(np.timedelta64)
True
>>> is_datetime_or_timedelta_dtype(np.array(['a', 'b']))
False
>>> is_datetime_or_timedelta_dtype(pd.Series([1, 2]))
False
>>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.timedelta64))
True
>>> is_datetime_or_timedelta_dtype(np.array([], dtype=np.datetime64))
True
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, (np.datetime64, np.timedelta64))
def _is_unorderable_exception(e):
"""
Check if the exception raised is an unorderable exception.
The error message differs for 3 <= PY <= 3.5 and PY >= 3.6, so
we need to condition based on Python version.
Parameters
----------
e : Exception or sub-class
The exception object to check.
Returns
-------
boolean : Whether or not the exception raised is an unorderable exception.
"""
if PY36:
return "'>' not supported between instances of" in str(e)
elif PY3:
return 'unorderable' in str(e)
return False
def is_numeric_v_string_like(a, b):
"""
Check if we are comparing a string-like object to a numeric ndarray.
NumPy doesn't like to compare such objects, especially numeric arrays
and scalar string-likes.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean : Whether we return a comparing a string-like
object to a numeric array.
Examples
--------
>>> is_numeric_v_string_like(1, 1)
False
>>> is_numeric_v_string_like("foo", "foo")
False
>>> is_numeric_v_string_like(1, "foo") # non-array numeric
False
>>> is_numeric_v_string_like(np.array([1]), "foo")
True
>>> is_numeric_v_string_like("foo", np.array([1])) # symmetric check
True
>>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
True
>>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
True
>>> is_numeric_v_string_like(np.array([1]), np.array([2]))
False
>>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
False
"""
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
is_a_numeric_array = is_a_array and is_numeric_dtype(a)
is_b_numeric_array = is_b_array and is_numeric_dtype(b)
is_a_string_array = is_a_array and is_string_like_dtype(a)
is_b_string_array = is_b_array and is_string_like_dtype(b)
is_a_scalar_string_like = not is_a_array and is_string_like(a)
is_b_scalar_string_like = not is_b_array and is_string_like(b)
return ((is_a_numeric_array and is_b_scalar_string_like) or
(is_b_numeric_array and is_a_scalar_string_like) or
(is_a_numeric_array and is_b_string_array) or
(is_b_numeric_array and is_a_string_array))
def is_datetimelike_v_numeric(a, b):
"""
Check if we are comparing a datetime-like object to a numeric object.
By "numeric," we mean an object that is either of an int or float dtype.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean : Whether we return a comparing a datetime-like
to a numeric object.
Examples
--------
>>> dt = np.datetime64(pd.datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_numeric(1, 1)
False
>>> is_datetimelike_v_numeric(dt, dt)
False
>>> is_datetimelike_v_numeric(1, dt)
True
>>> is_datetimelike_v_numeric(dt, 1) # symmetric check
True
>>> is_datetimelike_v_numeric(np.array([dt]), 1)
True
>>> is_datetimelike_v_numeric(np.array([1]), dt)
True
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
True
>>> is_datetimelike_v_numeric(np.array([1]), np.array([2]))
False
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
False
"""
if not hasattr(a, 'dtype'):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
def is_numeric(x):
"""
Check if an object has a numeric dtype (i.e. integer or float).
"""
return is_integer_dtype(x) or is_float_dtype(x)
is_datetimelike = needs_i8_conversion
return ((is_datetimelike(a) and is_numeric(b)) or
(is_datetimelike(b) and is_numeric(a)))
def is_datetimelike_v_object(a, b):
"""
Check if we are comparing a datetime-like object to an object instance.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean : Whether we return a comparing a datetime-like
to an object instance.
Examples
--------
>>> obj = object()
>>> dt = np.datetime64(pd.datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_object(obj, obj)
False
>>> is_datetimelike_v_object(dt, dt)
False
>>> is_datetimelike_v_object(obj, dt)
True
>>> is_datetimelike_v_object(dt, obj) # symmetric check
True
>>> is_datetimelike_v_object(np.array([dt]), obj)
True
>>> is_datetimelike_v_object(np.array([obj]), dt)
True
>>> is_datetimelike_v_object(np.array([dt]), np.array([obj]))
True
>>> is_datetimelike_v_object(np.array([obj]), np.array([obj]))
False
>>> is_datetimelike_v_object(np.array([dt]), np.array([1]))
False
>>> is_datetimelike_v_object(np.array([dt]), np.array([dt]))
False
"""
if not hasattr(a, 'dtype'):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
is_datetimelike = needs_i8_conversion
return ((is_datetimelike(a) and is_object_dtype(b)) or
(is_datetimelike(b) and is_object_dtype(a)))
def needs_i8_conversion(arr_or_dtype):
"""
Check whether the array or dtype should be converted to int64.
An array-like or dtype "needs" such a conversion if the array-like
or dtype is of a datetime-like dtype
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype should be converted to int64.
Examples
--------
>>> needs_i8_conversion(str)
False
>>> needs_i8_conversion(np.int64)
False
>>> needs_i8_conversion(np.datetime64)
True
>>> needs_i8_conversion(np.array(['a', 'b']))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
True
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
"""
if arr_or_dtype is None:
return False
return (is_datetime_or_timedelta_dtype(arr_or_dtype) or
is_datetime64tz_dtype(arr_or_dtype) or
is_period_dtype(arr_or_dtype))
def is_numeric_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a numeric dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a numeric dtype.
Examples
--------
>>> is_numeric_dtype(str)
False
>>> is_numeric_dtype(int)
True
>>> is_numeric_dtype(float)
True
>>> is_numeric_dtype(np.uint64)
True
>>> is_numeric_dtype(np.datetime64)
False
>>> is_numeric_dtype(np.timedelta64)
False
>>> is_numeric_dtype(np.array(['a', 'b']))
False
>>> is_numeric_dtype(pd.Series([1, 2]))
True
>>> is_numeric_dtype(pd.Index([1, 2.]))
True
>>> is_numeric_dtype(np.array([], dtype=np.timedelta64))
False
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, (np.number, np.bool_)) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_string_like_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a string-like dtype.
Unlike `is_string_dtype`, the object dtype is excluded because it
is a mixed dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of the string dtype.
Examples
--------
>>> is_string_like_dtype(str)
True
>>> is_string_like_dtype(object)
False
>>> is_string_like_dtype(np.array(['a', 'b']))
True
>>> is_string_like_dtype(pd.Series([1, 2]))
False
"""
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
return dtype.kind in ('S', 'U')
except TypeError:
return False
def is_float_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a float dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a float dtype.
Examples
--------
>>> is_float_dtype(str)
False
>>> is_float_dtype(int)
False
>>> is_float_dtype(float)
True
>>> is_float_dtype(np.array(['a', 'b']))
False
>>> is_float_dtype(pd.Series([1, 2]))
False
>>> is_float_dtype(pd.Index([1, 2.]))
True
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.floating)
def is_floating_dtype(arr_or_dtype):
"""
DEPRECATED: This function will be removed in a future version.
Check whether the provided array or dtype is an instance of
numpy's float dtype.
Unlike, `is_float_dtype`, this check is a lot stricter, as it requires
`isinstance` of `np.floating` and not `issubclass`.
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return isinstance(tipo, np.floating)
def is_bool_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a boolean dtype.
Examples
--------
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
"""
if arr_or_dtype is None:
return False
try:
tipo = _get_dtype_type(arr_or_dtype)
except ValueError:
# this isn't even a dtype
return False
return issubclass(tipo, np.bool_)
def is_extension_type(arr):
"""
Check whether an array-like is of a pandas extension class instance.
Extension classes include categoricals, pandas sparse objects (i.e.
classes represented within the pandas library and not ones external
to it like scipy sparse matrices), and datetime-like arrays.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean : Whether or not the array-like is of a pandas
extension class instance.
Examples
--------
>>> is_extension_type([1, 2, 3])
False
>>> is_extension_type(np.array([1, 2, 3]))
False
>>>
>>> cat = pd.Categorical([1, 2, 3])
>>>
>>> is_extension_type(cat)
True
>>> is_extension_type(pd.Series(cat))
True
>>> is_extension_type(pd.SparseArray([1, 2, 3]))
True
>>> is_extension_type(pd.SparseSeries([1, 2, 3]))
True
>>>
>>> from scipy.sparse import bsr_matrix
>>> is_extension_type(bsr_matrix([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_extension_type(s)
True
"""
if is_categorical(arr):
return True
elif is_sparse(arr):
return True
elif is_datetimetz(arr):
return True
return False
def is_complex_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a complex dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean : Whether or not the array or dtype is of a compex dtype.
Examples
--------
>>> is_complex_dtype(str)
False
>>> is_complex_dtype(int)
False
>>> is_complex_dtype(np.complex)
True
>>> is_complex_dtype(np.array(['a', 'b']))
False
>>> is_complex_dtype(pd.Series([1, 2]))
False
>>> is_complex_dtype(np.array([1 + 1j, 5]))
True
"""
if arr_or_dtype is None:
return False
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.complexfloating)
def _coerce_to_dtype(dtype):
"""
Coerce a string or np.dtype to a pandas or numpy
dtype if possible.
If we cannot convert to a pandas dtype initially,
we convert to a numpy dtype.
Parameters
----------
dtype : The dtype that we want to coerce.
Returns
-------
pd_or_np_dtype : The coerced dtype.
"""
if is_categorical_dtype(dtype):
dtype = CategoricalDtype()
elif is_datetime64tz_dtype(dtype):
dtype = DatetimeTZDtype(dtype)
elif is_period_dtype(dtype):
dtype = PeriodDtype(dtype)
elif is_interval_dtype(dtype):
dtype = IntervalDtype(dtype)
else:
dtype = np.dtype(dtype)
return dtype
def _get_dtype(arr_or_dtype):
"""
Get the dtype instance associated with an array
or dtype object.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose dtype we want to extract.
Returns
-------
obj_dtype : The extract dtype instance from the
passed in array or dtype object.
Raises
------
TypeError : The passed in object is None.
"""
if arr_or_dtype is None:
raise TypeError("Cannot deduce dtype from null object")
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
elif isinstance(arr_or_dtype, CategoricalDtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, DatetimeTZDtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, PeriodDtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, IntervalDtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, string_types):
if is_categorical_dtype(arr_or_dtype):
return CategoricalDtype.construct_from_string(arr_or_dtype)
elif is_datetime64tz_dtype(arr_or_dtype):
return DatetimeTZDtype.construct_from_string(arr_or_dtype)
elif is_period_dtype(arr_or_dtype):
return PeriodDtype.construct_from_string(arr_or_dtype)
elif is_interval_dtype(arr_or_dtype):
return IntervalDtype.construct_from_string(arr_or_dtype)
if hasattr(arr_or_dtype, 'dtype'):
arr_or_dtype = arr_or_dtype.dtype
return np.dtype(arr_or_dtype)
def _get_dtype_type(arr_or_dtype):
"""
Get the type (NOT dtype) instance associated with
an array or dtype object.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose type we want to extract.
Returns
-------
obj_type : The extract type instance from the
passed in array or dtype object.
"""
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype.type
elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype).type
elif isinstance(arr_or_dtype, CategoricalDtype):
return CategoricalDtypeType
elif isinstance(arr_or_dtype, DatetimeTZDtype):
return DatetimeTZDtypeType
elif isinstance(arr_or_dtype, IntervalDtype):
return IntervalDtypeType
elif isinstance(arr_or_dtype, PeriodDtype):
return PeriodDtypeType
elif isinstance(arr_or_dtype, string_types):
if is_categorical_dtype(arr_or_dtype):
return CategoricalDtypeType
elif is_datetime64tz_dtype(arr_or_dtype):
return DatetimeTZDtypeType
elif is_period_dtype(arr_or_dtype):
return PeriodDtypeType
elif is_interval_dtype(arr_or_dtype):
return IntervalDtypeType
return _get_dtype_type(np.dtype(arr_or_dtype))
try:
return arr_or_dtype.dtype.type
except AttributeError:
return type(None)
def _get_dtype_from_object(dtype):
"""
Get a numpy dtype.type-style object for a dtype object.
This methods also includes handling of the datetime64[ns] and
datetime64[ns, TZ] objects.
If no dtype can be found, we return ``object``.
Parameters
----------
dtype : dtype, type
The dtype object whose numpy dtype.type-style
object we want to extract.
Returns
-------
dtype_object : The extracted numpy dtype.type-style object.
"""
if isinstance(dtype, type) and issubclass(dtype, np.generic):
# Type object from a dtype
return dtype
elif is_categorical(dtype):
return CategoricalDtype().type
elif is_datetimetz(dtype):
return DatetimeTZDtype(dtype).type
elif isinstance(dtype, np.dtype): # dtype object
try:
_validate_date_like_dtype(dtype)
except TypeError:
# Should still pass if we don't have a date-like
pass
return dtype.type
elif isinstance(dtype, string_types):
if dtype in ['datetimetz', 'datetime64tz']:
return DatetimeTZDtype.type
elif dtype in ['period']:
raise NotImplementedError
if dtype == 'datetime' or dtype == 'timedelta':
dtype += '64'
try:
return _get_dtype_from_object(getattr(np, dtype))
except (AttributeError, TypeError):
# Handles cases like _get_dtype(int) i.e.,
# Python objects that are valid dtypes
# (unlike user-defined types, in general)
#
# TypeError handles the float16 type code of 'e'
# further handle internal types
pass
return _get_dtype_from_object(np.dtype(dtype))
def _validate_date_like_dtype(dtype):
"""
Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific)
"""
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('%s' % e)
if typ != 'generic' and typ != 'ns':
raise ValueError('%r is too specific of a frequency, try passing %r' %
(dtype.name, dtype.type.__name__))
_string_dtypes = frozenset(map(_get_dtype_from_object, (binary_type,
text_type)))
def pandas_dtype(dtype):
"""
Converts input into a pandas only dtype object or a numpy dtype object.
Parameters
----------
dtype : object to be converted
Returns
-------
np.dtype or a pandas dtype
"""
if isinstance(dtype, DatetimeTZDtype):
return dtype
elif isinstance(dtype, PeriodDtype):
return dtype
elif isinstance(dtype, CategoricalDtype):
return dtype
elif isinstance(dtype, IntervalDtype):
return dtype
elif isinstance(dtype, string_types):
try:
return DatetimeTZDtype.construct_from_string(dtype)
except TypeError:
pass
if dtype.startswith('period[') or dtype.startswith('Period['):
# do not parse string like U as period[U]
try:
return PeriodDtype.construct_from_string(dtype)
except TypeError:
pass
elif dtype.startswith('interval[') or dtype.startswith('Interval['):
try:
return IntervalDtype.construct_from_string(dtype)
except TypeError:
pass
try:
return CategoricalDtype.construct_from_string(dtype)
except TypeError:
pass
elif isinstance(dtype, ExtensionDtype):
return dtype
try:
npdtype = np.dtype(dtype)
except (TypeError, ValueError):
raise
# Any invalid dtype (such as pd.Timestamp) should raise an error.
# np.dtype(invalid_type).kind = 0 for such objects. However, this will
# also catch some valid dtypes such as object, np.object_ and 'object'
# which we safeguard against by catching them earlier and returning
# np.dtype(valid_dtype) before this condition is evaluated.
if dtype in [object, np.object_, 'object', 'O']:
return npdtype
elif npdtype.kind == 'O':
raise TypeError('dtype {0} not understood'.format(dtype))
return npdtype
| mit |
CforED/Machine-Learning | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
dtusar/coco | code-postprocessing/bbob_pproc/ppfigdim.py | 3 | 24511 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Generate performance scaling figures.
The figures show the scaling of the performance in terms of ERT w.r.t.
dimensionality on a log-log scale. On the y-axis, data is represented as
a number of function evaluations divided by dimension, this is in order
to compare at a glance with a linear scaling for which ERT is
proportional to the dimension and would therefore be represented by a
horizontal line in the figure.
Crosses (+) give the median number of function evaluations of successful
trials divided by dimension for the smallest *reached* target function
value.
Numbers indicate the number of successful runs for the smallest
*reached* target.
If the smallest target function value (1e-8) is not reached for a given
dimension, crosses (x) give the average number of overall conducted
function evaluations divided by the dimension.
Horizontal lines indicate linear scaling with the dimension, additional
grid lines show quadratic and cubic scaling.
The thick light line with diamond markers shows the single best results
from BBOB-2009 for df = 1e-8.
**Example**
.. plot::
:width: 50%
import urllib
import tarfile
import glob
from pylab import *
import bbob_pproc as bb
# Collect and unarchive data (3.4MB)
dataurl = 'http://coco.lri.fr/BBOB2009/pythondata/BIPOP-CMA-ES.tar.gz'
filename, headers = urllib.urlretrieve(dataurl)
archivefile = tarfile.open(filename)
archivefile.extractall()
# Scaling figure
ds = bb.load(glob.glob('BBOB2009pythondata/BIPOP-CMA-ES/ppdata_f002_*.pickle'))
figure()
bb.ppfigdim.plot(ds)
bb.ppfigdim.beautify()
bb.ppfigdim.plot_previous_algorithms(2, False) # plot BBOB 2009 best algorithm on fun 2
"""
from __future__ import absolute_import
import os
import warnings
import matplotlib.pyplot as plt
import numpy as np
from pdb import set_trace
from . import genericsettings, toolsstats, bestalg, pproc, ppfig, ppfigparam
values_of_interest = pproc.TargetValues((10, 1, 1e-1, 1e-2, 1e-3, 1e-5, 1e-8)) # to rename!?
xlim_max = None
ynormalize_by_dimension = True # not at all tested yet
styles = [ # sort of rainbow style, most difficult (red) first
{'color': 'r', 'marker': 'o', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
{'color': 'm', 'marker': '.', 'linewidth': 4},
{'color': 'y', 'marker': '^', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
{'color': 'g', 'marker': '.', 'linewidth': 4},
{'color': 'c', 'marker': 'v', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
{'color': 'b', 'marker': '.', 'linewidth': 4},
{'color': 'k', 'marker': 'o', 'markeredgecolor': 'k', 'markeredgewidth': 2, 'linewidth': 4},
]
refcolor = 'wheat'
caption_part_one = r"""%
Expected number of $f$-evaluations (\ERT, lines) to reach $\fopt+\Df$;
median number of $f$-evaluations (+) to reach the most difficult
target that was reached not always but at least once; maximum number of
$f$-evaluations in any trial ({\color{red}$\times$}); """ + (r"""interquartile
range with median (notched boxes) of simulated runlengths
to reach $\fopt+\Df$;""" if genericsettings.scaling_figures_with_boxes
else "") + """ all values are """ + ("""divided by dimension and """ if ynormalize_by_dimension else "") + """
plotted as $\log_{10}$ values versus dimension. %
"""
#""" .replace('REPLACE_THIS', r"interquartile range with median (notched boxes) of simulated runlengths to reach $\fopt+\Df$;"
# if genericsettings.scaling_figures_with_boxes else '')
# # r"(the exponent is given in the legend of #1). " +
# "For each function and dimension, $\\ERT(\\Df)$ equals to $\\nbFEs(\\Df)$ " +
# "divided by the number of successful trials, where a trial is " +
# "successful if $\\fopt+\\Df$ was surpassed. The " +
# "$\\nbFEs(\\Df)$ are the total number (the sum) of $f$-evaluations while " +
# "$\\fopt+\\Df$ was not surpassed in a trial, from all " +
# "(successful and unsuccessful) trials, and \\fopt\\ is the optimal " +
# "function value. " +
scaling_figure_caption_fixed = caption_part_one + r"""%
Shown are $\Df = 10^{\{values_of_interest\}}$.
Numbers above \ERT-symbols (if appearing) indicate the number of trials reaching the respective target.
The light thick line with diamonds indicates the respective best result from BBOB-2009 for $\Df=10^{-8}$.
Horizontal lines mean linear scaling, slanted grid lines depict quadratic scaling.
"""
scaling_figure_caption_rlbased = caption_part_one + r"""%
Shown is the \ERT\ for
targets just not reached by
% the largest $\Df$-values $\ge10^{-8}$ for which the \ERT\ of
the artificial GECCO-BBOB-2009 best algorithm
within the given budget $k\times\DIM$, where $k$ is shown in the legend.
% was above $\{values_of_interest\}\times\DIM$ evaluations.
Numbers above \ERT-symbols indicate the number of trials reaching the respective target.
The light thick line with diamonds indicates the respective best result from BBOB-2009 for
the most difficult target.
Slanted grid lines indicate a scaling with ${\cal O}(\DIM)$ compared to ${\cal O}(1)$
when using the respective 2009 best algorithm.
"""
# r"Shown is the \ERT\ for the smallest $\Df$-values $\ge10^{-8}$ for which the \ERT\ of the GECCO-BBOB-2009 best algorithm " +
# r"was below $10^{\{values_of_interest\}}\times\DIM$ evaluations. " +
# should correspond with the colors in pprldistr.
dimensions = genericsettings.dimensions_to_display
functions_with_legend = (1, 24, 101, 130)
def scaling_figure_caption():
if genericsettings.runlength_based_targets:
return scaling_figure_caption_rlbased.replace('values_of_interest',
', '.join(values_of_interest.labels()))
else:
return scaling_figure_caption_fixed.replace('values_of_interest',
', '.join(values_of_interest.loglabels()))
def beautify(axesLabel=True):
"""Customize figure presentation.
Uses information from :file:`benchmarkshortinfos.txt` for figure
title.
"""
# Input checking
# Get axis handle and set scale for each axis
axisHandle = plt.gca()
axisHandle.set_xscale("log")
axisHandle.set_yscale("log")
# Grid options
axisHandle.xaxis.grid(False, which='major')
# axisHandle.grid(True, which='major')
axisHandle.grid(False, which='minor')
# axisHandle.xaxis.grid(True, linewidth=0, which='major')
ymin, ymax = plt.ylim()
# horizontal grid
if isinstance(values_of_interest, pproc.RunlengthBasedTargetValues):
axisHandle.yaxis.grid(False, which='major')
expon = values_of_interest.times_dimension - ynormalize_by_dimension
for (i, y) in enumerate(reversed(values_of_interest.run_lengths)):
plt.plot((1, 200), [y, y], 'k:', linewidth=0.2)
if i / 2. == i // 2:
plt.plot((1, 200), [y, y * 200**expon],
styles[i]['color'] + '-', linewidth=0.2)
else:
# TODO: none of this is visible in svg format!
axisHandle.yaxis.grid(True, which='major')
# for i in xrange(0, 11):
# plt.plot((0.2, 20000), 2 * [10**i], 'k:', linewidth=0.5)
# quadratic slanted "grid"
for i in xrange(-2, 7, 1 if ymax < 1e5 else 2):
plt.plot((0.2, 20000), (10**i, 10**(i + 5)), 'k:', linewidth=0.5)
# TODO: this should be done before the real lines are plotted?
# for x in dimensions:
# plt.plot(2 * [x], [0.1, 1e11], 'k:', linewidth=0.5)
# Ticks on axes
# axisHandle.invert_xaxis()
dimticklist = dimensions
dimannlist = dimensions
# TODO: All these should depend on one given input (xlim, ylim)
axisHandle.set_xticks(dimticklist)
axisHandle.set_xticklabels([str(n) for n in dimannlist])
logyend = 11 # int(1 + np.log10(plt.ylim()[1]))
axisHandle.set_yticks([10.**i for i in xrange(0, logyend)])
axisHandle.set_yticklabels(range(0, logyend))
if 11 < 3:
tmp = axisHandle.get_yticks()
tmp2 = []
for i in tmp:
tmp2.append('%d' % round(np.log10(i)))
axisHandle.set_yticklabels(tmp2)
if 11 < 3:
# ticklabels = 10**np.arange(int(np.log10(plt.ylim()[0])), int(np.log10(1 + plt.ylim()[1])))
ticks = []
for i in xrange(int(np.log10(plt.ylim()[0])), int(np.log10(1 + plt.ylim()[1]))):
ticks += [10 ** i, 2 * 10 ** i, 5 * 10 ** i]
axisHandle.set_yticks(ticks)
# axisHandle.set_yticklabels(ticklabels)
# axes limites
plt.xlim(0.9 * dimensions[0], 1.125 * dimensions[-1])
if xlim_max is not None:
if isinstance(values_of_interest, pproc.RunlengthBasedTargetValues):
plt.ylim(0.3, xlim_max) # set in config
else:
# pass # TODO: xlim_max seems to be not None even when not desired
plt.ylim(ymax=min([plot.ylim()[1], xlim_max]))
plt.ylim(ppfig.discretize_limits((ymin, ymax)))
if 11 < 3:
title = plt.gca().get_title() # works not not as expected
if title.startswith('1 ') or title.startswith('5 '):
plt.ylim(0.5, 1e2)
if title.startswith('19 ') or title.startswith('20 '):
plt.ylim(0.5, 1e4)
if axesLabel:
plt.xlabel('Dimension')
if ynormalize_by_dimension:
plt.ylabel('Run Lengths / Dimension')
else:
plt.ylabel('Run Lengths')
def generateData(dataSet, targetFuncValue):
"""Computes an array of results to be plotted.
:returns: (ert, success rate, number of success, total number of
function evaluations, median of successful runs).
"""
it = iter(reversed(dataSet.evals))
i = it.next()
prev = np.array([np.nan] * len(i))
while i[0] <= targetFuncValue:
prev = i
try:
i = it.next()
except StopIteration:
break
data = prev[1:].copy() # keep only the number of function evaluations.
succ = (np.isnan(data) == False)
if succ.any():
med = toolsstats.prctile(data[succ], 50)[0]
# Line above was modified at rev 3050 to make sure that we consider only
# successful trials in the median
else:
med = np.nan
data[np.isnan(data)] = dataSet.maxevals[np.isnan(data)]
res = []
res.extend(toolsstats.sp(data, issuccessful=succ, allowinf=False))
res.append(np.mean(data)) # mean(FE)
res.append(med)
return np.array(res)
def plot_a_bar(x, y,
plot_cmd=plt.loglog,
rec_width=0.1, # box ("rectangle") width, log scale
rec_taille_fac=0.3, # notch width parameter
styles={'color': 'b'},
linewidth=1,
fill_color=None, # None means no fill
fill_transparency=0.7 # 1 should be invisible
):
"""plot/draw a notched error bar, x is the x-position,
y[0,1,2] are lower, median and upper percentile respectively.
hold(True) to see everything.
TODO: with linewidth=0, inf is not visible
"""
if not np.isfinite(y[2]):
y[2] = y[1] + 100 * (y[1] - y[0])
if plot_cmd in (plt.loglog, plt.semilogy):
y[2] = (1 + y[1]) * (1 + y[1] / y[0])**10
if not np.isfinite(y[0]):
y[0] = y[1] - 100 * (y[2] - y[1])
if plot_cmd in (plt.loglog, plt.semilogy):
y[0] = y[1] / (1 + y[2] / y[1])**10
styles2 = {}
for s in styles:
styles2[s] = styles[s]
styles2['linewidth'] = linewidth
styles2['markeredgecolor'] = styles2['color']
dim = 1 # to remove error
x0 = x
if plot_cmd in (plt.loglog, plt.semilogx):
r = np.exp(rec_width) # ** ((1. + i_target / 3.) / 4) # more difficult targets get a wider box
x = [x0 * dim / r, x0 * r * dim] # assumes log-scale of x-axis
xm = [x0 * dim / (r**rec_taille_fac), x0 * dim * (r**rec_taille_fac)]
else:
r = rec_width
x = [x0 * dim - r, x0 * dim + r]
xm = [x0 * dim - (r * rec_taille_fac), x0 * dim + (r * rec_taille_fac)]
y = np.array(y) / dim
if fill_color is not None:
plt.fill_between([x[0], xm[0], x[0], x[1], xm[1], x[1], x[0]],
[y[0], y[1], y[2], y[2], y[1], y[0], y[0]],
color=fill_color, alpha=1-fill_transparency)
plot_cmd([x[0], xm[0], x[0], x[1], xm[1], x[1], x[0]],
[y[0], y[1], y[2], y[2], y[1], y[0], y[0]],
markersize=0, **styles2)
styles2['linewidth'] = 0
plot_cmd([x[0], x[1], x[1], x[0], x[0]],
[y[0], y[0], y[2], y[2], y[0]],
**styles2)
styles2['linewidth'] = 2 # median
plot_cmd([x[0], x[1]], [y[1], y[1]],
markersize=0, **styles2)
def plot(dsList, valuesOfInterest=values_of_interest, styles=styles):
"""From a DataSetList, plot a figure of ERT/dim vs dim.
There will be one set of graphs per function represented in the
input data sets. Most usually the data sets of different functions
will be represented separately.
:param DataSetList dsList: data sets
:param seq valuesOfInterest:
target precisions via class TargetValues, there might
be as many graphs as there are elements in
this input. Can be different for each
function (a dictionary indexed by ifun).
:returns: handles
"""
valuesOfInterest = pproc.TargetValues.cast(valuesOfInterest)
styles = list(reversed(styles[:len(valuesOfInterest)]))
dsList = pproc.DataSetList(dsList)
dictFunc = dsList.dictByFunc()
res = []
for func in dictFunc:
dictFunc[func] = dictFunc[func].dictByDim()
dimensions = sorted(dictFunc[func])
# legend = []
line = []
mediandata = {}
displaynumber = {}
for i_target in range(len(valuesOfInterest)):
succ = []
unsucc = []
# data = []
maxevals = np.ones(len(dimensions))
maxevals_succ = np.ones(len(dimensions))
# Collect data that have the same function and different dimension.
for idim, dim in enumerate(dimensions):
assert len(dictFunc[func][dim]) == 1
# (ert, success rate, number of success, total number of
# function evaluations, median of successful runs)
tmp = generateData(dictFunc[func][dim][0], valuesOfInterest((func, dim))[i_target])
maxevals[idim] = max(dictFunc[func][dim][0].maxevals)
# data.append(np.append(dim, tmp))
if tmp[2] > 0: # Number of success is larger than 0
succ.append(np.append(dim, tmp))
if tmp[2] < dictFunc[func][dim][0].nbRuns():
displaynumber[dim] = ((dim, tmp[0], tmp[2]))
mediandata[dim] = (i_target, tmp[-1])
unsucc.append(np.append(dim, np.nan))
else:
unsucc.append(np.append(dim, tmp[-2])) # total number of fevals
if len(succ) > 0:
tmp = np.vstack(succ)
# ERT
if genericsettings.scaling_figures_with_boxes:
for dim in dimensions:
# to find finite simulated runlengths we need to have at least one successful run
if dictFunc[func][dim][0].detSuccesses([valuesOfInterest((func, dim))[i_target]])[0]:
# make a box-plot
y = toolsstats.drawSP_from_dataset(
dictFunc[func][dim][0],
valuesOfInterest((func, dim))[i_target],
[25, 50, 75],
genericsettings.simulated_runlength_bootstrap_sample_size)[0]
rec_width = 1.1 # box ("rectangle") width
rec_taille_fac = 0.3 # notch width parameter
r = rec_width ** ((1. + i_target / 3.) / 4) # more difficult targets get a wider box
styles2 = {}
for s in styles[i_target]:
styles2[s] = styles[i_target][s]
styles2['linewidth'] = 1
styles2['markeredgecolor'] = styles2['color']
x = [dim / r, r * dim]
xm = [dim / (r**rec_taille_fac), dim * (r**rec_taille_fac)]
y = np.array(y) / dim
plt.plot([x[0], xm[0], x[0], x[1], xm[1], x[1], x[0]],
[y[0], y[1], y[2], y[2], y[1], y[0], y[0]],
markersize=0, **styles2)
styles2['linewidth'] = 0
plt.plot([x[0], x[1], x[1], x[0], x[0]],
[y[0], y[0], y[2], y[2], y[0]],
**styles2)
styles2['linewidth'] = 2 # median
plt.plot([x[0], x[1]], [y[1], y[1]],
markersize=0, **styles2)
# plot lines, we have to be smart to connect only adjacent dimensions
for i, n in enumerate(tmp[:, 0]):
j = list(dimensions).index(n)
if i == len(tmp[:, 0]) - 1 or j == len(dimensions) - 1:
break
if dimensions[j+1] == tmp[i+1, 0]:
res.extend(plt.plot(tmp[i:i+2, 0], tmp[i:i+2, 1] / tmp[i:i+2, 0]**ynormalize_by_dimension,
markersize=0, clip_on=True, **styles[i_target]))
# plot only marker
lw = styles[i_target].get('linewidth', None)
styles[i_target]['linewidth'] = 0
res.extend(plt.plot(tmp[:, 0], tmp[:, 1] / tmp[:, 0]**ynormalize_by_dimension,
markersize=20, clip_on=True, **styles[i_target]))
# restore linewidth
if lw:
styles[i_target]['linewidth'] = lw
else:
del styles[i_target]['linewidth']
# To have the legend displayed whatever happens with the data.
for i in reversed(range(len(valuesOfInterest))):
res.extend(plt.plot([], [], markersize=10,
label=valuesOfInterest.label(i) if isinstance(valuesOfInterest, pproc.RunlengthBasedTargetValues) else valuesOfInterest.loglabel(i),
**styles[i]))
# Only for the last target function value
if unsucc: # obsolete
tmp = np.vstack(unsucc) # tmp[:, 0] needs to be sorted!
# res.extend(plt.plot(tmp[:, 0], tmp[:, 1]/tmp[:, 0],
# color=styles[len(valuesOfInterest)-1]['color'],
# marker='x', markersize=20))
if 1 < 3: # maxevals
ylim = plt.ylim()
res.extend(plt.plot(tmp[:, 0], maxevals / tmp[:, 0]**ynormalize_by_dimension,
color=styles[len(valuesOfInterest) - 1]['color'],
ls='', marker='x', markersize=20))
plt.ylim(ylim)
# median
if mediandata:
# for i, tm in mediandata.iteritems():
for i in displaynumber: # display median where success prob is smaller than one
tm = mediandata[i]
plt.plot((i,), (tm[1] / i**ynormalize_by_dimension,),
color=styles[tm[0]]['color'],
linestyle='', marker='+', markersize=30,
markeredgewidth=5, zorder= -1)
a = plt.gca()
# the displaynumber is emptied for each new target precision
# therefore the displaynumber displayed below correspond to the
# last target (must be the hardest)
if displaynumber: # displayed only for the smallest valuesOfInterest
for _k, j in displaynumber.iteritems():
# the 1.5 factor is a shift up for the digits
plt.text(j[0], 1.5 * j[1] / j[0]**ynormalize_by_dimension,
"%.0f" % j[2], axes=a,
horizontalalignment="center",
verticalalignment="bottom",
fontsize=plt.rcParams['font.size'] * 0.85)
# if later the ylim[0] becomes >> 1, this might be a problem
return res
def plot_previous_algorithms(func, isBiobjective, target=values_of_interest): # lambda x: [1e-8]):
"""Add graph of the BBOB-2009 virtual best algorithm using the
last, most difficult target in ``target``."""
target = pproc.TargetValues.cast(target)
bestalgentries = bestalg.loadBestAlgorithm(isBiobjective)
bestalgdata = []
for d in dimensions:
try:
entry = bestalgentries[(d, func)]
tmp = entry.detERT([target((func, d))[-1]])[0]
if not np.isinf(tmp):
bestalgdata.append(tmp / d)
else:
bestalgdata.append(np.inf)
except KeyError: # dimension not in bestalg
bestalgdata.append(np.inf) # None/nan give a runtime warning
res = plt.plot(dimensions, bestalgdata, color=refcolor, linewidth=10,
marker='d', markersize=25, markeredgecolor='k',
zorder= -2)
return res
def main(dsList, _valuesOfInterest, outputdir, verbose=True):
"""From a DataSetList, returns a convergence and ERT/dim figure vs dim.
Uses data of BBOB 2009 (:py:mod:`bbob_pproc.bestalg`).
:param DataSetList dsList: data sets
:param seq _valuesOfInterest: target precisions, either as list or as
``pproc.TargetValues`` class instance.
There will be as many graphs as there are
elements in this input.
:param string outputdir: output directory
:param bool verbose: controls verbosity
"""
# plt.rc("axes", labelsize=20, titlesize=24)
# plt.rc("xtick", labelsize=20)
# plt.rc("ytick", labelsize=20)
# plt.rc("font", size=20)
# plt.rc("legend", fontsize=20)
_valuesOfInterest = pproc.TargetValues.cast(_valuesOfInterest)
bestalg.loadBestAlgorithm(dsList.isBiobjective())
dictFunc = dsList.dictByFunc()
ppfig.save_single_functions_html(os.path.join(outputdir, genericsettings.single_algorithm_file_name),
dictFunc[dictFunc.keys()[0]][0].algId,
algorithmCount=ppfig.AlgorithmCount.ONE,
values_of_interest = values_of_interest)
ppfig.copy_js_files(outputdir)
funInfos = ppfigparam.read_fun_infos(dsList.isBiobjective())
for func in dictFunc:
plot(dictFunc[func], _valuesOfInterest, styles=styles) # styles might have changed via config
beautify(axesLabel=False)
plt.text(plt.xlim()[0], plt.ylim()[0],
_valuesOfInterest.short_info, fontsize=14)
if func in functions_with_legend:
plt.legend(loc="best")
if func in funInfos.keys():
# print(plt.rcParams['axes.titlesize'])
# print(plt.rcParams['font.size'])
funcName = funInfos[func]
fontSize = 24 - max(0, 4 * ((len(funcName) - 35) / 5))
plt.gca().set_title(funcName, fontsize=fontSize) # 24 is global font.size
plot_previous_algorithms(func, dsList.isBiobjective(), _valuesOfInterest)
filename = os.path.join(outputdir, 'ppfigdim_f%03d' % (func))
with warnings.catch_warnings(record=True) as ws:
ppfig.saveFigure(filename, verbose=verbose)
if len(ws):
for w in ws:
print(w)
print('while saving figure in "' + filename +
'" (in ppfigdim.py:551)')
plt.close()
| bsd-3-clause |
mwv/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/core/ops.py | 9 | 48430 | """
Arithmetic operations for PandasObjects
This is not a public API.
"""
# necessary to enforce truediv in Python 2.X
from __future__ import division
import operator
import warnings
import numpy as np
import pandas as pd
import datetime
from pandas import compat, lib, tslib
import pandas.index as _index
from pandas.util.decorators import Appender
import pandas.core.common as com
import pandas.computation.expressions as expressions
from pandas.lib import isscalar
from pandas.tslib import iNaT
from pandas.compat import bind_method
from pandas.core.common import(is_list_like, notnull, isnull,
_values_from_object, _maybe_match_name,
needs_i8_conversion, is_datetimelike_v_numeric,
is_integer_dtype, is_categorical_dtype, is_object_dtype,
is_timedelta64_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_bool_dtype)
from pandas.io.common import PerformanceWarning
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
def _create_methods(arith_method, radd_func, comp_method, bool_method,
use_numexpr, special=False, default_axis='columns'):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
# NOTE: Only frame cares about default_axis, specifically: special methods
# have default axis None, whereas flex methods have default axis 'columns'
# if we're not using numexpr, then don't pass a str_rep
if use_numexpr:
op = lambda x: x
else:
op = lambda x: None
if special:
def names(x):
if x[-1] == "_":
return "__%s_" % x
else:
return "__%s__" % x
else:
names = lambda x: x
radd_func = radd_func or operator.add
# Inframe, all special methods have default_axis=None, flex methods have
# default_axis set to the default (columns)
new_methods = dict(
add=arith_method(operator.add, names('add'), op('+'),
default_axis=default_axis),
radd=arith_method(radd_func, names('radd'), op('+'),
default_axis=default_axis),
sub=arith_method(operator.sub, names('sub'), op('-'),
default_axis=default_axis),
mul=arith_method(operator.mul, names('mul'), op('*'),
default_axis=default_axis),
truediv=arith_method(operator.truediv, names('truediv'), op('/'),
truediv=True, fill_zeros=np.inf,
default_axis=default_axis),
floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf),
# Causes a floating point exception in the tests when numexpr
# enabled, so for now no speedup
mod=arith_method(operator.mod, names('mod'), None,
default_axis=default_axis, fill_zeros=np.nan),
pow=arith_method(operator.pow, names('pow'), op('**'),
default_axis=default_axis),
# not entirely sure why this is necessary, but previously was included
# so it's here to maintain compatibility
rmul=arith_method(operator.mul, names('rmul'), op('*'),
default_axis=default_axis, reversed=True),
rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'),
default_axis=default_axis, reversed=True),
rtruediv=arith_method(lambda x, y: operator.truediv(y, x),
names('rtruediv'), op('/'), truediv=True,
fill_zeros=np.inf, default_axis=default_axis,
reversed=True),
rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x),
names('rfloordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf,
reversed=True),
rpow=arith_method(lambda x, y: y ** x, names('rpow'), op('**'),
default_axis=default_axis, reversed=True),
rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'),
default_axis=default_axis, fill_zeros=np.nan,
reversed=True),
)
new_methods['div'] = new_methods['truediv']
new_methods['rdiv'] = new_methods['rtruediv']
# Comp methods never had a default axis set
if comp_method:
new_methods.update(dict(
eq=comp_method(operator.eq, names('eq'), op('==')),
ne=comp_method(operator.ne, names('ne'), op('!='), masker=True),
lt=comp_method(operator.lt, names('lt'), op('<')),
gt=comp_method(operator.gt, names('gt'), op('>')),
le=comp_method(operator.le, names('le'), op('<=')),
ge=comp_method(operator.ge, names('ge'), op('>=')),
))
if bool_method:
new_methods.update(dict(
and_=bool_method(operator.and_, names('and_'), op('&')),
or_=bool_method(operator.or_, names('or_'), op('|')),
# For some reason ``^`` wasn't used in original.
xor=bool_method(operator.xor, names('xor'), op('^')),
rand_=bool_method(lambda x, y: operator.and_(y, x),
names('rand_'), op('&')),
ror_=bool_method(lambda x, y: operator.or_(y, x), names('ror_'), op('|')),
rxor=bool_method(lambda x, y: operator.xor(y, x), names('rxor'), op('^'))
))
new_methods = dict((names(k), v) for k, v in new_methods.items())
return new_methods
def add_methods(cls, new_methods, force, select, exclude):
if select and exclude:
raise TypeError("May only pass either select or exclude")
methods = new_methods
if select:
select = set(select)
methods = {}
for key, method in new_methods.items():
if key in select:
methods[key] = method
if exclude:
for k in exclude:
new_methods.pop(k, None)
for name, method in new_methods.items():
if force or name not in cls.__dict__:
bind_method(cls, name, method)
#----------------------------------------------------------------------
# Arithmetic
def add_special_arithmetic_methods(cls, arith_method=None, radd_func=None,
comp_method=None, bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
Parameters
----------
arith_method : function (optional)
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
radd_func : function (optional)
Possible replacement for ``operator.add`` for compatibility
comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
radd_func = radd_func or operator.add
# in frame, special methods have default_axis = None, comp methods use
# 'columns'
new_methods = _create_methods(arith_method, radd_func, comp_method,
bool_method, use_numexpr, default_axis=None,
special=True)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(result.reindex_like(self,copy=False)._data,
verify_is_copy=False)
return self
return f
new_methods.update(dict(
__iadd__=_wrap_inplace_method(new_methods["__add__"]),
__isub__=_wrap_inplace_method(new_methods["__sub__"]),
__imul__=_wrap_inplace_method(new_methods["__mul__"]),
__itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
__ipow__=_wrap_inplace_method(new_methods["__pow__"]),
))
if not compat.PY3:
new_methods["__idiv__"] = new_methods["__div__"]
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None,
flex_comp_method=None, flex_bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
Parameters
----------
flex_arith_method : function
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
radd_func : function (optional)
Possible replacement for ``lambda x, y: operator.add(y, x)`` for
compatibility
flex_comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
radd_func = radd_func or (lambda x, y: operator.add(y, x))
# in frame, default axis is 'columns', doesn't matter for series and panel
new_methods = _create_methods(
flex_arith_method, radd_func, flex_comp_method, flex_bool_method,
use_numexpr, default_axis='columns', special=False)
new_methods.update(dict(
multiply=new_methods['mul'],
subtract=new_methods['sub'],
divide=new_methods['div']
))
# opt out of bool flex methods for now
for k in ('ror_', 'rxor', 'rand_'):
if k in new_methods:
new_methods.pop(k)
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
class _TimeOp(object):
"""
Wrapper around Series datetime/time/timedelta arithmetic operations.
Generally, you should use classmethod ``maybe_convert_for_time_op`` as an
entry point.
"""
fill_value = iNaT
wrap_results = staticmethod(lambda x: x)
dtype = None
def __init__(self, left, right, name, na_op):
# need to make sure that we are aligning the data
if isinstance(left, pd.Series) and isinstance(right, pd.Series):
left, right = left.align(right,copy=False)
lvalues = self._convert_to_array(left, name=name)
rvalues = self._convert_to_array(right, name=name, other=lvalues)
self.name = name
self.na_op = na_op
# left
self.left = left
self.is_offset_lhs = self._is_offset(left)
self.is_timedelta_lhs = is_timedelta64_dtype(lvalues)
self.is_datetime64_lhs = is_datetime64_dtype(lvalues)
self.is_datetime64tz_lhs = is_datetime64tz_dtype(lvalues)
self.is_datetime_lhs = self.is_datetime64_lhs or self.is_datetime64tz_lhs
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
# right
self.right = right
self.is_offset_rhs = self._is_offset(right)
self.is_datetime64_rhs = is_datetime64_dtype(rvalues)
self.is_datetime64tz_rhs = is_datetime64tz_dtype(rvalues)
self.is_datetime_rhs = self.is_datetime64_rhs or self.is_datetime64tz_rhs
self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
self._validate(lvalues, rvalues, name)
self.lvalues, self.rvalues = self._convert_for_datetime(lvalues, rvalues)
def _validate(self, lvalues, rvalues, name):
# timedelta and integer mul/div
if (self.is_timedelta_lhs and self.is_integer_rhs) or (
self.is_integer_lhs and self.is_timedelta_rhs):
if name not in ('__div__', '__truediv__', '__mul__'):
raise TypeError("can only operate on a timedelta and an "
"integer for division, but the operator [%s]"
"was passed" % name)
# 2 datetimes
elif self.is_datetime_lhs and self.is_datetime_rhs:
if name not in ('__sub__','__rsub__'):
raise TypeError("can only operate on a datetimes for"
" subtraction, but the operator [%s] was"
" passed" % name)
# if tz's must be equal (same or None)
if getattr(lvalues,'tz',None) != getattr(rvalues,'tz',None):
raise ValueError("Incompatbile tz's on datetime subtraction ops")
# 2 timedeltas
elif ((self.is_timedelta_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)) or
(self.is_timedelta_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs))):
if name not in ('__div__', '__rdiv__', '__truediv__', '__rtruediv__',
'__add__', '__radd__', '__sub__', '__rsub__'):
raise TypeError("can only operate on a timedeltas for "
"addition, subtraction, and division, but the"
" operator [%s] was passed" % name)
# datetime and timedelta/DateOffset
elif (self.is_datetime_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)):
if name not in ('__add__', '__radd__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of"
" a timedelta/DateOffset for addition and subtraction,"
" but the operator [%s] was passed" %
name)
elif ((self.is_timedelta_lhs or self.is_offset_lhs)
and self.is_datetime_rhs):
if name not in ('__add__', '__radd__'):
raise TypeError("can only operate on a timedelta/DateOffset and"
" a datetime for addition, but the operator"
" [%s] was passed" % name)
else:
raise TypeError('cannot operate on a series with out a rhs '
'of a series/ndarray of type datetime64[ns] '
'or a timedelta')
def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.tseries.timedeltas import to_timedelta
ovalues = values
if not is_list_like(values):
values = np.array([values])
inferred_type = lib.infer_dtype(values)
if inferred_type in ('datetime64', 'datetime', 'date', 'time'):
# if we have a other of timedelta, but use pd.NaT here we
# we are in the wrong path
if (other is not None and other.dtype == 'timedelta64[ns]' and
all(isnull(v) for v in values)):
values = np.empty(values.shape, dtype=other.dtype)
values[:] = iNaT
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
# datetime with tz
elif isinstance(ovalues, datetime.datetime) and hasattr(ovalues,'tz'):
values = pd.DatetimeIndex(values)
# datetime array with tz
elif com.is_datetimetz(values):
if isinstance(values, pd.Series):
values = values._values
elif not (isinstance(values, (np.ndarray, pd.Series)) and
is_datetime64_dtype(values)):
values = tslib.array_to_datetime(values)
elif inferred_type in ('timedelta', 'timedelta64'):
# have a timedelta, convert to to ns here
values = to_timedelta(values, errors='coerce')
elif inferred_type == 'integer':
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == 'm':
values = values.astype('timedelta64[ns]')
elif isinstance(values, pd.PeriodIndex):
values = values.to_timestamp().to_series()
elif name not in ('__truediv__', '__div__', '__mul__'):
raise TypeError("incompatible type for a datetime/timedelta "
"operation [{0}]".format(name))
elif inferred_type == 'floating':
# all nan, so ok, use the other dtype (e.g. timedelta or datetime)
if isnull(values).all():
values = np.empty(values.shape, dtype=other.dtype)
values[:] = iNaT
else:
raise TypeError(
'incompatible type [{0}] for a datetime/timedelta '
'operation'.format(np.array(values).dtype))
elif self._is_offset(values):
return values
else:
raise TypeError("incompatible type [{0}] for a datetime/timedelta"
" operation".format(np.array(values).dtype))
return values
def _convert_for_datetime(self, lvalues, rvalues):
from pandas.tseries.timedeltas import to_timedelta
mask = isnull(lvalues) | isnull(rvalues)
# datetimes require views
if self.is_datetime_lhs or self.is_datetime_rhs:
# datetime subtraction means timedelta
if self.is_datetime_lhs and self.is_datetime_rhs:
self.dtype = 'timedelta64[ns]'
elif self.is_datetime64tz_lhs:
self.dtype = lvalues.dtype
elif self.is_datetime64tz_rhs:
self.dtype = rvalues.dtype
else:
self.dtype = 'datetime64[ns]'
# if adding single offset try vectorized path
# in DatetimeIndex; otherwise elementwise apply
def _offset(lvalues, rvalues):
if len(lvalues) == 1:
rvalues = pd.DatetimeIndex(rvalues)
lvalues = lvalues[0]
else:
warnings.warn("Adding/subtracting array of DateOffsets to Series not vectorized",
PerformanceWarning)
rvalues = rvalues.astype('O')
# pass thru on the na_op
self.na_op = lambda x, y: getattr(x,self.name)(y)
return lvalues, rvalues
if self.is_offset_lhs:
lvalues, rvalues = _offset(lvalues, rvalues)
elif self.is_offset_rhs:
rvalues, lvalues = _offset(rvalues, lvalues)
else:
# with tz, convert to UTC
if self.is_datetime64tz_lhs:
lvalues = lvalues.tz_localize(None)
if self.is_datetime64tz_rhs:
rvalues = rvalues.tz_localize(None)
lvalues = lvalues.view(np.int64)
rvalues = rvalues.view(np.int64)
# otherwise it's a timedelta
else:
self.dtype = 'timedelta64[ns]'
# convert Tick DateOffset to underlying delta
if self.is_offset_lhs:
lvalues = to_timedelta(lvalues)
if self.is_offset_rhs:
rvalues = to_timedelta(rvalues)
lvalues = lvalues.astype(np.int64)
rvalues = rvalues.astype(np.int64)
# time delta division -> unit less
# integer gets converted to timedelta in np < 1.6
if (self.is_timedelta_lhs and self.is_timedelta_rhs) and\
not self.is_integer_rhs and\
not self.is_integer_lhs and\
self.name in ('__div__', '__truediv__'):
self.dtype = 'float64'
self.fill_value = np.nan
lvalues = lvalues.astype(np.float64)
rvalues = rvalues.astype(np.float64)
# if we need to mask the results
if mask.any():
def f(x):
# datetime64[ns]/timedelta64[ns] masking
try:
x = np.array(x, dtype=self.dtype)
except TypeError:
x = np.array(x, dtype='datetime64[ns]')
np.putmask(x, mask, self.fill_value)
return x
self.wrap_results = f
return lvalues, rvalues
def _is_offset(self, arr_or_obj):
""" check if obj or all elements of list-like is DateOffset """
if isinstance(arr_or_obj, pd.DateOffset):
return True
elif is_list_like(arr_or_obj):
return all(isinstance(x, pd.DateOffset) for x in arr_or_obj)
else:
return False
@classmethod
def maybe_convert_for_time_op(cls, left, right, name, na_op):
"""
if ``left`` and ``right`` are appropriate for datetime arithmetic with
operation ``name``, processes them and returns a ``_TimeOp`` object
that stores all the required values. Otherwise, it will generate
either a ``NotImplementedError`` or ``None``, indicating that the
operation is unsupported for datetimes (e.g., an unsupported r_op) or
that the data is not the right type for time ops.
"""
# decide if we can do it
is_timedelta_lhs = is_timedelta64_dtype(left)
is_datetime_lhs = is_datetime64_dtype(left) or is_datetime64tz_dtype(left)
if not (is_datetime_lhs or is_timedelta_lhs):
return None
return cls(left, right, name, na_op)
def _arith_method_SERIES(op, name, str_rep, fill_zeros=None,
default_axis=None, **eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
if isinstance(y, (np.ndarray, pd.Series, pd.Index)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
mask = notnull(x) & notnull(y)
result[mask] = op(x[mask], _values_from_object(y[mask]))
elif isinstance(x, np.ndarray):
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
else:
raise TypeError("{typ} cannot perform the operation {op}".format(typ=type(x).__name__,op=str_rep))
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
def wrapper(left, right, name=name, na_op=na_op):
if isinstance(right, pd.DataFrame):
return NotImplemented
time_converted = _TimeOp.maybe_convert_for_time_op(left, right, name, na_op)
if time_converted is None:
lvalues, rvalues = left, right
dtype = None
wrap_results = lambda x: x
elif time_converted == NotImplemented:
return NotImplemented
else:
left, right = time_converted.left, time_converted.right
lvalues, rvalues = time_converted.lvalues, time_converted.rvalues
dtype = time_converted.dtype
wrap_results = time_converted.wrap_results
na_op = time_converted.na_op
if isinstance(rvalues, pd.Series):
rindex = getattr(rvalues,'index',rvalues)
name = _maybe_match_name(left, rvalues)
lvalues = getattr(lvalues, 'values', lvalues)
rvalues = getattr(rvalues, 'values', rvalues)
if left.index.equals(rindex):
index = left.index
else:
index, lidx, ridx = left.index.join(rindex, how='outer',
return_indexers=True)
if lidx is not None:
lvalues = com.take_1d(lvalues, lidx)
if ridx is not None:
rvalues = com.take_1d(rvalues, ridx)
arr = na_op(lvalues, rvalues)
return left._constructor(wrap_results(arr), index=index,
name=name, dtype=dtype)
else:
# scalars
if hasattr(lvalues, 'values') and not isinstance(lvalues, pd.DatetimeIndex):
lvalues = lvalues.values
return left._constructor(wrap_results(na_op(lvalues, rvalues)),
index=left.index, name=left.name,
dtype=dtype)
return wrapper
def _comp_method_SERIES(op, name, str_rep, masker=False):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
# dispatch to the categorical if we have a categorical
# in either operand
if is_categorical_dtype(x):
return op(x,y)
elif is_categorical_dtype(y) and not isscalar(y):
return op(y,x)
if is_object_dtype(x.dtype):
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
if not is_object_dtype(y.dtype):
result = lib.vec_compare(x, y.astype(np.object_), op)
else:
result = lib.vec_compare(x, y, op)
else:
result = lib.scalar_compare(x, y, op)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
if is_datetimelike_v_numeric(x, y):
raise TypeError("invalid type comparison")
# numpy does not like comparisons vs None
if isscalar(y) and isnull(y):
if name == '__ne__':
return np.ones(len(x), dtype=bool)
else:
return np.zeros(len(x), dtype=bool)
# we have a datetime/timedelta and may need to convert
mask = None
if needs_i8_conversion(x) or (not isscalar(y) and needs_i8_conversion(y)):
if isscalar(y):
y = _index.convert_scalar(x,_values_from_object(y))
else:
y = y.view('i8')
mask = isnull(x)
x = x.view('i8')
try:
result = getattr(x, name)(y)
if result is NotImplemented:
raise TypeError("invalid type comparison")
except AttributeError:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, pd.Series):
name = _maybe_match_name(self, other)
if len(self) != len(other):
raise ValueError('Series lengths must match to compare')
return self._constructor(na_op(self.values, other.values),
index=self.index, name=name)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
if len(self) != len(other):
raise ValueError('Lengths must match to compare')
return self._constructor(na_op(self.values, np.asarray(other)),
index=self.index).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not is_categorical_dtype(self):
msg = "Cannot compare a Categorical for op {op} with Series of dtype {typ}.\n"\
"If you want to compare values, use 'series <op> np.asarray(other)'."
raise TypeError(msg.format(op=op,typ=self.dtype))
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray, which would then
# not take categories ordering into account
# we can go directly to op, as the na_op would just test again and dispatch to it.
res = op(self.values, other)
else:
values = self.get_values()
if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
res = na_op(values, other)
if isscalar(res):
raise TypeError('Could not compare %s type with Series'
% type(other))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name,
dtype='bool')
return res
return wrapper
def _bool_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
result = op(x, y) # when would this be hit?
else:
x = com._ensure_object(x)
y = com._ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
try:
# let null fall thru
if not isnull(y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
raise TypeError("cannot compare a dtyped [{0}] array with "
"a scalar of type [{1}]".format(
x.dtype, type(y).__name__))
return result
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
if isinstance(other, pd.Series):
name = _maybe_match_name(self, other)
other = other.reindex_like(self)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
return filler(self._constructor(na_op(self.values, other.values),
index=self.index,
name=name))
elif isinstance(other, pd.DataFrame):
return NotImplemented
else:
# scalars, list, tuple, np.array
filler = fill_int if is_self_int_dtype and is_integer_dtype(np.asarray(other)) else fill_bool
return filler(self._constructor(na_op(self.values, other),
index=self.index)).__finalize__(self)
return wrapper
def _radd_compat(left, right):
radd = lambda x, y: y + x
# GH #353, NumPy 1.5.1 workaround
try:
output = radd(left, right)
except TypeError:
raise
return output
_op_descriptions = {'add': {'op': '+', 'desc': 'Addition', 'reversed': False, 'reverse': 'radd'},
'sub': {'op': '-', 'desc': 'Subtraction', 'reversed': False, 'reverse': 'rsub'},
'mul': {'op': '*', 'desc': 'Multiplication', 'reversed': False, 'reverse': 'rmul'},
'mod': {'op': '%', 'desc': 'Modulo', 'reversed': False, 'reverse': 'rmod'},
'pow': {'op': '**', 'desc': 'Exponential power', 'reversed': False, 'reverse': 'rpow'},
'truediv': {'op': '/', 'desc': 'Floating division', 'reversed': False, 'reverse': 'rtruediv'},
'floordiv': {'op': '//', 'desc': 'Integer division', 'reversed': False, 'reverse': 'rfloordiv'}}
_op_names = list(_op_descriptions.keys())
for k in _op_names:
reverse_op = _op_descriptions[k]['reverse']
_op_descriptions[reverse_op] = _op_descriptions[k].copy()
_op_descriptions[reverse_op]['reversed'] = True
_op_descriptions[reverse_op]['reverse'] = k
def _flex_method_SERIES(op, name, str_rep, default_axis=None,
fill_zeros=None, **eval_kwargs):
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' series'
else:
equiv = 'series ' + op_desc['op'] + ' other'
doc = """
%s of series and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other: Series or scalar value
fill_value : None or float value, default None (NaN)
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
result : Series
See also
--------
Series.%s
""" % (op_desc['desc'], op_name, equiv, op_desc['reverse'])
@Appender(doc)
def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# validate axis
self._get_axis_number(axis)
if isinstance(other, pd.Series):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, pd.Series, list, tuple)):
if len(other) != len(self):
raise ValueError('Lengths must be equal')
return self._binop(self._constructor(other, self.index), op,
level=level, fill_value=fill_value)
else:
return self._constructor(op(self.values, other),
self.index).__finalize__(self)
flex_wrapper.__name__ = name
return flex_wrapper
series_flex_funcs = dict(flex_arith_method=_flex_method_SERIES,
radd_func=_radd_compat,
flex_comp_method=_comp_method_SERIES)
series_special_funcs = dict(arith_method=_arith_method_SERIES,
radd_func=_radd_compat,
comp_method=_comp_method_SERIES,
bool_method=_bool_method_SERIES)
_arith_doc_FRAME = """
Binary operator %s with support to substitute a fill_value for missing data in
one of the inputs
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame locations are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
"""
def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns',
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(
op, str_rep, x, y, raise_on_error=True, **eval_kwargs)
except TypeError:
xrav = x.ravel()
if isinstance(y, (np.ndarray, pd.Series)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
xrav = xrav[mask]
yrav = yrav[mask]
if np.prod(xrav.shape) and np.prod(yrav.shape):
result[mask] = op(xrav, yrav)
elif hasattr(x,'size'):
result = np.empty(x.size, dtype=x.dtype)
mask = notnull(xrav)
xrav = xrav[mask]
if np.prod(xrav.shape):
result[mask] = op(xrav, y)
else:
raise TypeError("cannot perform operation {op} between objects "
"of type {x} and {y}".format(op=name,x=type(x),y=type(y)))
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape)
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' dataframe'
else:
equiv = 'dataframe ' + op_desc['op'] + ' other'
doc = """
%s of dataframe and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame locations are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
See also
--------
DataFrame.%s
""" % (op_desc['desc'], op_name, equiv, op_desc['reverse'])
else:
doc = _arith_doc_FRAME % name
@Appender(doc)
def f(self, other, axis=default_axis, level=None, fill_value=None):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._combine_frame(other, na_op, fill_value, level)
elif isinstance(other, pd.Series):
return self._combine_series(other, na_op, fill_value, axis, level)
elif isinstance(other, (list, tuple)):
if axis is not None and self._get_axis_name(axis) == 'index':
# TODO: Get all of these to use _constructor_sliced
# casted = self._constructor_sliced(other, index=self.index)
casted = pd.Series(other, index=self.index)
else:
# casted = self._constructor_sliced(other, index=self.columns)
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, fill_value, axis, level)
elif isinstance(other, np.ndarray) and other.ndim: # skips np scalar
if other.ndim == 1:
if axis is not None and self._get_axis_name(axis) == 'index':
# casted = self._constructor_sliced(other,
# index=self.index)
casted = pd.Series(other, index=self.index)
else:
# casted = self._constructor_sliced(other,
# index=self.columns)
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, fill_value,
axis, level)
elif other.ndim == 2:
# casted = self._constructor(other, index=self.index,
# columns=self.columns)
casted = pd.DataFrame(other, index=self.index,
columns=self.columns)
return self._combine_frame(casted, na_op, fill_value, level)
else:
raise ValueError("Incompatible argument shape: %s" %
(other.shape, ))
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
# Masker unused for now
def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns',
masker=False):
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=x.dtype)
if isinstance(y, (np.ndarray, pd.Series)):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for flexible comparison methods %s' % name)
def f(self, other, axis=default_axis, level=None):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._flex_compare_frame(other, na_op, str_rep, level)
elif isinstance(other, pd.Series):
return self._combine_series(other, na_op, None, axis, level)
elif isinstance(other, (list, tuple)):
if axis is not None and self._get_axis_name(axis) == 'index':
casted = pd.Series(other, index=self.index)
else:
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, None, axis, level)
elif isinstance(other, np.ndarray):
if other.ndim == 1:
if axis is not None and self._get_axis_name(axis) == 'index':
casted = pd.Series(other, index=self.index)
else:
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, None, axis, level)
elif other.ndim == 2:
casted = pd.DataFrame(other, index=self.index,
columns=self.columns)
return self._flex_compare_frame(casted, na_op, str_rep, level)
else:
raise ValueError("Incompatible argument shape: %s" %
(other.shape, ))
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
def _comp_method_FRAME(func, name, str_rep, masker=False):
@Appender('Wrapper for comparison method %s' % name)
def f(self, other):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._compare_frame(other, func, str_rep)
elif isinstance(other, pd.Series):
return self._combine_series_infer(other, func)
else:
# straight boolean comparisions we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
res = self._combine_const(other, func, raise_on_error=False)
return res.fillna(True).astype(bool)
f.__name__ = name
return f
frame_flex_funcs = dict(flex_arith_method=_arith_method_FRAME,
radd_func=_radd_compat,
flex_comp_method=_flex_comp_method_FRAME)
frame_special_funcs = dict(arith_method=_arith_method_FRAME,
radd_func=_radd_compat,
comp_method=_comp_method_FRAME,
bool_method=_arith_method_FRAME)
def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None,
default_axis=None, **eval_kwargs):
# copied from Series na_op above, but without unnecessary branch for
# non-scalar
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
# TODO: might need to find_common_type here?
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
# work only for scalars
def f(self, other):
if not isscalar(other):
raise ValueError('Simple arithmetic with %s can only be '
'done with scalar values' %
self._constructor.__name__)
return self._combine(other, op)
f.__name__ = name
return f
def _comp_method_PANEL(op, name, str_rep=None, masker=False):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, np.ndarray):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for comparison method %s' % name)
def f(self, other):
if isinstance(other, self._constructor):
return self._compare_constructor(other, na_op)
elif isinstance(other, (self._constructor_sliced, pd.DataFrame,
pd.Series)):
raise Exception("input needs alignment for this object [%s]" %
self._constructor)
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
panel_special_funcs = dict(arith_method=_arith_method_PANEL,
comp_method=_comp_method_PANEL,
bool_method=_arith_method_PANEL)
| mit |
xya/sms-tools | lectures/04-STFT/plots-code/spectrogram.py | 19 | 1174 | import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import utilFunctions as UF
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft
import math
(fs, x) = UF.wavread('../../../sounds/piano.wav')
w = np.hamming(1001)
N = 1024
H = 256
mX, pX = STFT.stftAnal(x, fs, w, N, H)
plt.figure(1, figsize=(9.5, 6))
plt.subplot(211)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX (piano.wav), M=1001, N=1024, H=256')
plt.autoscale(tight=True)
plt.subplot(212)
numFrames = int(pX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.diff(np.transpose(pX),axis=0))
plt.title('pX derivative (piano.wav), M=1001, N=1024, H=256')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('spectrogram.png')
plt.show()
| agpl-3.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/indexes/datetimes/test_tools.py | 1 | 66369 | """ test to_datetime """
import sys
import pytz
import pytest
import locale
import calendar
import dateutil
import numpy as np
from dateutil.parser import parse
from datetime import datetime, date, time
from distutils.version import LooseVersion
import pandas as pd
from pandas._libs import tslib
from pandas._libs.tslibs import parsing
from pandas.core.tools import datetimes as tools
from pandas.core.tools.datetimes import normalize_date
from pandas.compat import lmap
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import is_datetime64_ns_dtype
from pandas.util import testing as tm
from pandas.util.testing import assert_series_equal, _skip_if_has_locale
from pandas import (isna, to_datetime, Timestamp, Series, DataFrame,
Index, DatetimeIndex, NaT, date_range, bdate_range,
compat)
class TestTimeConversionFormats(object):
def test_to_datetime_format(self):
values = ['1/1/2000', '1/2/2000', '1/3/2000']
results1 = [Timestamp('20000101'), Timestamp('20000201'),
Timestamp('20000301')]
results2 = [Timestamp('20000101'), Timestamp('20000102'),
Timestamp('20000103')]
for vals, expecteds in [(values, (Index(results1), Index(results2))),
(Series(values),
(Series(results1), Series(results2))),
(values[0], (results1[0], results2[0])),
(values[1], (results1[1], results2[1])),
(values[2], (results1[2], results2[2]))]:
for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']):
result = to_datetime(vals, format=fmt)
expected = expecteds[i]
if isinstance(expected, Series):
assert_series_equal(result, Series(expected))
elif isinstance(expected, Timestamp):
assert result == expected
else:
tm.assert_index_equal(result, expected)
def test_to_datetime_format_YYYYMMDD(self):
s = Series([19801222, 19801222] + [19810105] * 5)
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format='%Y%m%d')
assert_series_equal(result, expected)
result = to_datetime(s.apply(str), format='%Y%m%d')
assert_series_equal(result, expected)
# with NaT
expected = Series([Timestamp("19801222"), Timestamp("19801222")] +
[Timestamp("19810105")] * 5)
expected[2] = np.nan
s[2] = np.nan
result = to_datetime(s, format='%Y%m%d')
assert_series_equal(result, expected)
# string with NaT
s = s.apply(str)
s[2] = 'nat'
result = to_datetime(s, format='%Y%m%d')
assert_series_equal(result, expected)
# coercion
# GH 7930
s = Series([20121231, 20141231, 99991231])
result = pd.to_datetime(s, format='%Y%m%d', errors='ignore')
expected = Series([datetime(2012, 12, 31),
datetime(2014, 12, 31), datetime(9999, 12, 31)],
dtype=object)
tm.assert_series_equal(result, expected)
result = pd.to_datetime(s, format='%Y%m%d', errors='coerce')
expected = Series(['20121231', '20141231', 'NaT'], dtype='M8[ns]')
assert_series_equal(result, expected)
# GH 10178
def test_to_datetime_format_integer(self):
s = Series([2000, 2001, 2002])
expected = Series([Timestamp(x) for x in s.apply(str)])
result = to_datetime(s, format='%Y')
assert_series_equal(result, expected)
s = Series([200001, 200105, 200206])
expected = Series([Timestamp(x[:4] + '-' + x[4:]) for x in s.apply(str)
])
result = to_datetime(s, format='%Y%m')
assert_series_equal(result, expected)
def test_to_datetime_format_microsecond(self):
# these are locale dependent
lang, _ = locale.getlocale()
month_abbr = calendar.month_abbr[4]
val = '01-{}-2011 00:00:01.978'.format(month_abbr)
format = '%d-%b-%Y %H:%M:%S.%f'
result = to_datetime(val, format=format)
exp = datetime.strptime(val, format)
assert result == exp
def test_to_datetime_format_time(self):
data = [
['01/10/2010 15:20', '%m/%d/%Y %H:%M',
Timestamp('2010-01-10 15:20')],
['01/10/2010 05:43', '%m/%d/%Y %I:%M',
Timestamp('2010-01-10 05:43')],
['01/10/2010 13:56:01', '%m/%d/%Y %H:%M:%S',
Timestamp('2010-01-10 13:56:01')] # ,
# ['01/10/2010 08:14 PM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 20:14')],
# ['01/10/2010 07:40 AM', '%m/%d/%Y %I:%M %p',
# Timestamp('2010-01-10 07:40')],
# ['01/10/2010 09:12:56 AM', '%m/%d/%Y %I:%M:%S %p',
# Timestamp('2010-01-10 09:12:56')]
]
for s, format, dt in data:
assert to_datetime(s, format=format) == dt
def test_to_datetime_with_non_exact(self):
# GH 10834
tm._skip_if_has_locale()
# 8904
# exact kw
if sys.version_info < (2, 7):
pytest.skip('on python version < 2.7')
s = Series(['19MAY11', 'foobar19MAY11', '19MAY11:00:00:00',
'19MAY11 00:00:00Z'])
result = to_datetime(s, format='%d%b%y', exact=False)
expected = to_datetime(s.str.extract(r'(\d+\w+\d+)', expand=False),
format='%d%b%y')
assert_series_equal(result, expected)
def test_parse_nanoseconds_with_formula(self):
# GH8989
# trunctaing the nanoseconds when a format was provided
for v in ["2012-01-01 09:00:00.000000001",
"2012-01-01 09:00:00.000001",
"2012-01-01 09:00:00.001",
"2012-01-01 09:00:00.001000",
"2012-01-01 09:00:00.001000000", ]:
expected = pd.to_datetime(v)
result = pd.to_datetime(v, format="%Y-%m-%d %H:%M:%S.%f")
assert result == expected
def test_to_datetime_format_weeks(self):
data = [
['2009324', '%Y%W%w', Timestamp('2009-08-13')],
['2013020', '%Y%U%w', Timestamp('2013-01-13')]
]
for s, format, dt in data:
assert to_datetime(s, format=format) == dt
class TestToDatetime(object):
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
assert pd.to_datetime(dt) == Timestamp(dt)
oob_dts = [np.datetime64('1000-01-01'), np.datetime64('5000-01-02'), ]
for dt in oob_dts:
pytest.raises(ValueError, pd.to_datetime, dt, errors='raise')
pytest.raises(ValueError, Timestamp, dt)
assert pd.to_datetime(dt, errors='coerce') is NaT
def test_to_datetime_array_of_dt64s(self):
dts = [np.datetime64('2000-01-01'), np.datetime64('2000-01-02'), ]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
tm.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
pytest.raises(ValueError, pd.to_datetime, dts_with_oob,
errors='raise')
tm.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, errors='coerce'),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
tslib.iNaT,
],
dtype='M8'
)
)
# With errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
tm.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, errors='ignore'),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_to_datetime_tz(self):
# xref 8260
# uniform returns a DatetimeIndex
arr = [pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')]
result = pd.to_datetime(arr)
expected = DatetimeIndex(
['2013-01-01 13:00:00', '2013-01-02 14:00:00'], tz='US/Pacific')
tm.assert_index_equal(result, expected)
# mixed tzs will raise
arr = [pd.Timestamp('2013-01-01 13:00:00', tz='US/Pacific'),
pd.Timestamp('2013-01-02 14:00:00', tz='US/Eastern')]
pytest.raises(ValueError, lambda: pd.to_datetime(arr))
def test_to_datetime_tz_pytz(self):
# see gh-8260
us_eastern = pytz.timezone('US/Eastern')
arr = np.array([us_eastern.localize(datetime(year=2000, month=1, day=1,
hour=3, minute=0)),
us_eastern.localize(datetime(year=2000, month=6, day=1,
hour=3, minute=0))],
dtype=object)
result = pd.to_datetime(arr, utc=True)
expected = DatetimeIndex(['2000-01-01 08:00:00+00:00',
'2000-06-01 07:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("init_constructor, end_constructor, test_method",
[(Index, DatetimeIndex, tm.assert_index_equal),
(list, DatetimeIndex, tm.assert_index_equal),
(np.array, DatetimeIndex, tm.assert_index_equal),
(Series, Series, tm.assert_series_equal)])
def test_to_datetime_utc_true(self,
init_constructor,
end_constructor,
test_method):
# See gh-11934 & gh-6415
data = ['20100102 121314', '20100102 121315']
expected_data = [pd.Timestamp('2010-01-02 12:13:14', tz='utc'),
pd.Timestamp('2010-01-02 12:13:15', tz='utc')]
result = pd.to_datetime(init_constructor(data),
format='%Y%m%d %H%M%S',
utc=True)
expected = end_constructor(expected_data)
test_method(result, expected)
# Test scalar case as well
for scalar, expected in zip(data, expected_data):
result = pd.to_datetime(scalar, format='%Y%m%d %H%M%S', utc=True)
assert result == expected
def test_to_datetime_utc_true_with_series_single_value(self):
# GH 15760 UTC=True with Series
ts = 1.5e18
result = pd.to_datetime(pd.Series([ts]), utc=True)
expected = pd.Series([pd.Timestamp(ts, tz='utc')])
tm.assert_series_equal(result, expected)
def test_to_datetime_utc_true_with_series_tzaware_string(self):
ts = '2013-01-01 00:00:00-01:00'
expected_ts = '2013-01-01 01:00:00'
data = pd.Series([ts] * 3)
result = pd.to_datetime(data, utc=True)
expected = pd.Series([pd.Timestamp(expected_ts, tz='utc')] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('date, dtype',
[('2013-01-01 01:00:00', 'datetime64[ns]'),
('2013-01-01 01:00:00', 'datetime64[ns, UTC]')])
def test_to_datetime_utc_true_with_series_datetime_ns(self, date, dtype):
expected = pd.Series([pd.Timestamp('2013-01-01 01:00:00', tz='UTC')])
result = pd.to_datetime(pd.Series([date], dtype=dtype), utc=True)
tm.assert_series_equal(result, expected)
def test_to_datetime_tz_psycopg2(self):
# xref 8260
try:
import psycopg2
except ImportError:
pytest.skip("no psycopg2 installed")
# misc cases
tz1 = psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None)
tz2 = psycopg2.tz.FixedOffsetTimezone(offset=-240, name=None)
arr = np.array([datetime(2000, 1, 1, 3, 0, tzinfo=tz1),
datetime(2000, 6, 1, 3, 0, tzinfo=tz2)],
dtype=object)
result = pd.to_datetime(arr, errors='coerce', utc=True)
expected = DatetimeIndex(['2000-01-01 08:00:00+00:00',
'2000-06-01 07:00:00+00:00'],
dtype='datetime64[ns, UTC]', freq=None)
tm.assert_index_equal(result, expected)
# dtype coercion
i = pd.DatetimeIndex([
'2000-01-01 08:00:00+00:00'
], tz=psycopg2.tz.FixedOffsetTimezone(offset=-300, name=None))
assert is_datetime64_ns_dtype(i)
# tz coerceion
result = pd.to_datetime(i, errors='coerce')
tm.assert_index_equal(result, i)
result = pd.to_datetime(i, errors='coerce', utc=True)
expected = pd.DatetimeIndex(['2000-01-01 13:00:00'],
dtype='datetime64[ns, UTC]')
tm.assert_index_equal(result, expected)
def test_datetime_bool(self):
# GH13176
with pytest.raises(TypeError):
to_datetime(False)
assert to_datetime(False, errors="coerce") is NaT
assert to_datetime(False, errors="ignore") is False
with pytest.raises(TypeError):
to_datetime(True)
assert to_datetime(True, errors="coerce") is NaT
assert to_datetime(True, errors="ignore") is True
with pytest.raises(TypeError):
to_datetime([False, datetime.today()])
with pytest.raises(TypeError):
to_datetime(['20130101', True])
tm.assert_index_equal(to_datetime([0, False, NaT, 0.0],
errors="coerce"),
DatetimeIndex([to_datetime(0), NaT,
NaT, to_datetime(0)]))
def test_datetime_invalid_datatype(self):
# GH13176
with pytest.raises(TypeError):
pd.to_datetime(bool)
with pytest.raises(TypeError):
pd.to_datetime(pd.to_datetime)
@pytest.mark.parametrize('date, format',
[('2017-20', '%Y-%W'),
('20 Sunday', '%W %A'),
('20 Sun', '%W %a'),
('2017-21', '%Y-%U'),
('20 Sunday', '%U %A'),
('20 Sun', '%U %a')])
def test_week_without_day_and_calendar_year(self, date, format):
# GH16774
msg = "Cannot use '%W' or '%U' without day and year"
with tm.assert_raises_regex(ValueError, msg):
pd.to_datetime(date, format=format)
class TestToDatetimeUnit(object):
def test_unit(self):
# GH 11758
# test proper behavior with erros
with pytest.raises(ValueError):
to_datetime([1], unit='D', format='%Y%m%d')
values = [11111111, 1, 1.0, tslib.iNaT, NaT, np.nan,
'NaT', '']
result = to_datetime(values, unit='D', errors='ignore')
expected = Index([11111111, Timestamp('1970-01-02'),
Timestamp('1970-01-02'), NaT,
NaT, NaT, NaT, NaT],
dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, unit='D', errors='coerce')
expected = DatetimeIndex(['NaT', '1970-01-02', '1970-01-02',
'NaT', 'NaT', 'NaT', 'NaT', 'NaT'])
tm.assert_index_equal(result, expected)
with pytest.raises(tslib.OutOfBoundsDatetime):
to_datetime(values, unit='D', errors='raise')
values = [1420043460000, tslib.iNaT, NaT, np.nan, 'NaT']
result = to_datetime(values, errors='ignore', unit='s')
expected = Index([1420043460000, NaT, NaT,
NaT, NaT], dtype=object)
tm.assert_index_equal(result, expected)
result = to_datetime(values, errors='coerce', unit='s')
expected = DatetimeIndex(['NaT', 'NaT', 'NaT', 'NaT', 'NaT'])
tm.assert_index_equal(result, expected)
with pytest.raises(tslib.OutOfBoundsDatetime):
to_datetime(values, errors='raise', unit='s')
# if we have a string, then we raise a ValueError
# and NOT an OutOfBoundsDatetime
for val in ['foo', Timestamp('20130101')]:
try:
to_datetime(val, errors='raise', unit='s')
except tslib.OutOfBoundsDatetime:
raise AssertionError("incorrect exception raised")
except ValueError:
pass
def test_unit_consistency(self):
# consistency of conversions
expected = Timestamp('1970-05-09 14:25:11')
result = pd.to_datetime(11111111, unit='s', errors='raise')
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit='s', errors='coerce')
assert result == expected
assert isinstance(result, Timestamp)
result = pd.to_datetime(11111111, unit='s', errors='ignore')
assert result == expected
assert isinstance(result, Timestamp)
def test_unit_with_numeric(self):
# GH 13180
# coercions from floats/ints are ok
expected = DatetimeIndex(['2015-06-19 05:33:20',
'2015-05-27 22:33:20'])
arr1 = [1.434692e+18, 1.432766e+18]
arr2 = np.array(arr1).astype('int64')
for errors in ['ignore', 'raise', 'coerce']:
result = pd.to_datetime(arr1, errors=errors)
tm.assert_index_equal(result, expected)
result = pd.to_datetime(arr2, errors=errors)
tm.assert_index_equal(result, expected)
# but we want to make sure that we are coercing
# if we have ints/strings
expected = DatetimeIndex(['NaT',
'2015-06-19 05:33:20',
'2015-05-27 22:33:20'])
arr = ['foo', 1.434692e+18, 1.432766e+18]
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
expected = DatetimeIndex(['2015-06-19 05:33:20',
'2015-05-27 22:33:20',
'NaT',
'NaT'])
arr = [1.434692e+18, 1.432766e+18, 'foo', 'NaT']
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
def test_unit_mixed(self):
# mixed integers/datetimes
expected = DatetimeIndex(['2013-01-01', 'NaT', 'NaT'])
arr = [pd.Timestamp('20130101'), 1.434692e+18, 1.432766e+18]
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(arr, errors='raise')
expected = DatetimeIndex(['NaT',
'NaT',
'2013-01-01'])
arr = [1.434692e+18, 1.432766e+18, pd.Timestamp('20130101')]
result = pd.to_datetime(arr, errors='coerce')
tm.assert_index_equal(result, expected)
with pytest.raises(ValueError):
pd.to_datetime(arr, errors='raise')
def test_dataframe(self):
df = DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5],
'hour': [6, 7],
'minute': [58, 59],
'second': [10, 11],
'ms': [1, 1],
'us': [2, 2],
'ns': [3, 3]})
result = to_datetime({'year': df['year'],
'month': df['month'],
'day': df['day']})
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:0:00')])
assert_series_equal(result, expected)
# dict-like
result = to_datetime(df[['year', 'month', 'day']].to_dict())
assert_series_equal(result, expected)
# dict but with constructable
df2 = df[['year', 'month', 'day']].to_dict()
df2['month'] = 2
result = to_datetime(df2)
expected2 = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160205 00:0:00')])
assert_series_equal(result, expected2)
# unit mappings
units = [{'year': 'years',
'month': 'months',
'day': 'days',
'hour': 'hours',
'minute': 'minutes',
'second': 'seconds'},
{'year': 'year',
'month': 'month',
'day': 'day',
'hour': 'hour',
'minute': 'minute',
'second': 'second'},
]
for d in units:
result = to_datetime(df[list(d.keys())].rename(columns=d))
expected = Series([Timestamp('20150204 06:58:10'),
Timestamp('20160305 07:59:11')])
assert_series_equal(result, expected)
d = {'year': 'year',
'month': 'month',
'day': 'day',
'hour': 'hour',
'minute': 'minute',
'second': 'second',
'ms': 'ms',
'us': 'us',
'ns': 'ns'}
result = to_datetime(df.rename(columns=d))
expected = Series([Timestamp('20150204 06:58:10.001002003'),
Timestamp('20160305 07:59:11.001002003')])
assert_series_equal(result, expected)
# coerce back to int
result = to_datetime(df.astype(str))
assert_series_equal(result, expected)
# passing coerce
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
msg = ("cannot assemble the datetimes: time data .+ does not "
"match format '%Y%m%d' \(match\)")
with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
result = to_datetime(df2, errors='coerce')
expected = Series([Timestamp('20150204 00:00:00'),
NaT])
assert_series_equal(result, expected)
# extra columns
msg = ("extra keys have been passed to the datetime assemblage: "
"\[foo\]")
with tm.assert_raises_regex(ValueError, msg):
df2 = df.copy()
df2['foo'] = 1
to_datetime(df2)
# not enough
msg = ('to assemble mappings requires at least that \[year, month, '
'day\] be specified: \[.+\] is missing')
for c in [['year'],
['year', 'month'],
['year', 'month', 'second'],
['month', 'day'],
['year', 'day', 'second']]:
with tm.assert_raises_regex(ValueError, msg):
to_datetime(df[c])
# duplicates
msg = 'cannot assemble with duplicate keys'
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5]})
df2.columns = ['year', 'year', 'day']
with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
df2 = DataFrame({'year': [2015, 2016],
'month': [2, 20],
'day': [4, 5],
'hour': [4, 5]})
df2.columns = ['year', 'month', 'day', 'day']
with tm.assert_raises_regex(ValueError, msg):
to_datetime(df2)
def test_dataframe_dtypes(self):
# #13451
df = DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
# int16
result = to_datetime(df.astype('int16'))
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:00:00')])
assert_series_equal(result, expected)
# mixed dtypes
df['month'] = df['month'].astype('int8')
df['day'] = df['day'].astype('int8')
result = to_datetime(df)
expected = Series([Timestamp('20150204 00:00:00'),
Timestamp('20160305 00:00:00')])
assert_series_equal(result, expected)
# float
df = DataFrame({'year': [2000, 2001],
'month': [1.5, 1],
'day': [1, 1]})
with pytest.raises(ValueError):
to_datetime(df)
class TestToDatetimeMisc(object):
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = idx.to_datetime()
expected = DatetimeIndex(pd.to_datetime(idx.values))
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
tm.assert_index_equal(result, expected)
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
assert result[0] == exp
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
assert result[0] == exp
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
assert rs == xp
# dayfirst is essentially broken
# to_datetime('01-13-2012', dayfirst=True)
# pytest.raises(ValueError, to_datetime('01-13-2012',
# dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
assert result[0] == s[0]
def test_to_datetime_with_space_in_series(self):
# GH 6428
s = Series(['10/18/2006', '10/18/2008', ' '])
pytest.raises(ValueError, lambda: to_datetime(s, errors='raise'))
result_coerce = to_datetime(s, errors='coerce')
expected_coerce = Series([datetime(2006, 10, 18),
datetime(2008, 10, 18),
NaT])
tm.assert_series_equal(result_coerce, expected_coerce)
result_ignore = to_datetime(s, errors='ignore')
tm.assert_series_equal(result_ignore, s)
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
tm._skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1, 2, 3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1, 2, 3])
pytest.raises(ValueError,
lambda: pd.to_datetime(td, format='%b %y',
errors='raise'))
pytest.raises(ValueError,
lambda: td.apply(pd.to_datetime, format='%b %y',
errors='raise'))
expected = pd.to_datetime(td, format='%b %y', errors='coerce')
result = td.apply(
lambda x: pd.to_datetime(x, format='%b %y', errors='coerce'))
assert_series_equal(result, expected)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
assert result is NaT
result = to_datetime(['', ''])
assert isna(result).all()
# ints
result = Timestamp(0)
expected = to_datetime(0)
assert result == expected
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
assert result == expected
# array = ['2012','20120101','20120101 12:01:01']
array = ['20120101', '20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp, array)
tm.assert_almost_equal(result, expected)
# currently fails ###
# result = Timestamp('2012')
# expected = to_datetime('2012')
# assert result == expected
def test_to_datetime_unprocessable_input(self):
# GH 4928
tm.assert_numpy_array_equal(
to_datetime([1, '1'], errors='ignore'),
np.array([1, '1'], dtype='O')
)
pytest.raises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
assert index[0] == scalar.astype('O')
value = Timestamp(scalar)
assert value == as_obj
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
tm.assert_index_equal(rng, result)
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
assert xp.freq == rs.freq
assert xp.tzinfo == rs.tzinfo
def test_to_datetime_overflow(self):
# gh-17637
# we are overflowing Timedelta range here
with pytest.raises(OverflowError):
date_range(start='1/1/1700', freq='B', periods=100000)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if isna(val):
expected[i] = tslib.iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
tm.assert_almost_equal(result, expected)
result2 = to_datetime(strings)
assert isinstance(result2, DatetimeIndex)
tm.assert_numpy_array_equal(result, result2.values)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
# GH 10636, default is now 'raise'
pytest.raises(ValueError,
lambda: to_datetime(malformed, errors='raise'))
result = to_datetime(malformed, errors='ignore')
tm.assert_numpy_array_equal(result, malformed)
pytest.raises(ValueError, to_datetime, malformed, errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isna(x):
expected[i] = tslib.iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected, check_names=False)
assert result.name == 'foo'
assert_series_equal(dresult, expected, check_names=False)
assert dresult.name == 'foo'
def test_dti_constructor_numpy_timeunits(self):
# GH 9114
base = pd.to_datetime(['2000-01-01T00:00', '2000-01-02T00:00', 'NaT'])
for dtype in ['datetime64[h]', 'datetime64[m]', 'datetime64[s]',
'datetime64[ms]', 'datetime64[us]', 'datetime64[ns]']:
values = base.values.astype(dtype)
tm.assert_index_equal(DatetimeIndex(values), base)
tm.assert_index_equal(to_datetime(values), base)
def test_dayfirst(self):
# GH 5917
arr = ['10/02/2014', '11/02/2014', '12/02/2014']
expected = DatetimeIndex([datetime(2014, 2, 10), datetime(2014, 2, 11),
datetime(2014, 2, 12)])
idx1 = DatetimeIndex(arr, dayfirst=True)
idx2 = DatetimeIndex(np.array(arr), dayfirst=True)
idx3 = to_datetime(arr, dayfirst=True)
idx4 = to_datetime(np.array(arr), dayfirst=True)
idx5 = DatetimeIndex(Index(arr), dayfirst=True)
idx6 = DatetimeIndex(Series(arr), dayfirst=True)
tm.assert_index_equal(expected, idx1)
tm.assert_index_equal(expected, idx2)
tm.assert_index_equal(expected, idx3)
tm.assert_index_equal(expected, idx4)
tm.assert_index_equal(expected, idx5)
tm.assert_index_equal(expected, idx6)
class TestGuessDatetimeFormat(object):
def test_guess_datetime_format_with_parseable_formats(self):
tm._skip_if_not_us_locale()
dt_string_to_format = (('20111230', '%Y%m%d'),
('2011-12-30', '%Y-%m-%d'),
('30-12-2011', '%d-%m-%Y'),
('2011-12-30 00:00:00', '%Y-%m-%d %H:%M:%S'),
('2011-12-30T00:00:00', '%Y-%m-%dT%H:%M:%S'),
('2011-12-30 00:00:00.000000',
'%Y-%m-%d %H:%M:%S.%f'), )
for dt_string, dt_format in dt_string_to_format:
assert tools._guess_datetime_format(dt_string) == dt_format
def test_guess_datetime_format_with_dayfirst(self):
ambiguous_string = '01/01/2011'
assert tools._guess_datetime_format(
ambiguous_string, dayfirst=True) == '%d/%m/%Y'
assert tools._guess_datetime_format(
ambiguous_string, dayfirst=False) == '%m/%d/%Y'
def test_guess_datetime_format_with_locale_specific_formats(self):
# The month names will vary depending on the locale, in which
# case these wont be parsed properly (dateutil can't parse them)
tm._skip_if_has_locale()
dt_string_to_format = (('30/Dec/2011', '%d/%b/%Y'),
('30/December/2011', '%d/%B/%Y'),
('30/Dec/2011 00:00:00', '%d/%b/%Y %H:%M:%S'), )
for dt_string, dt_format in dt_string_to_format:
assert tools._guess_datetime_format(dt_string) == dt_format
def test_guess_datetime_format_invalid_inputs(self):
# A datetime string must include a year, month and a day for it
# to be guessable, in addition to being a string that looks like
# a datetime
invalid_dts = [
'2013',
'01/2013',
'12:00:00',
'1/1/1/1',
'this_is_not_a_datetime',
'51a',
9,
datetime(2011, 1, 1),
]
for invalid_dt in invalid_dts:
assert tools._guess_datetime_format(invalid_dt) is None
def test_guess_datetime_format_nopadding(self):
# GH 11142
dt_string_to_format = (('2011-1-1', '%Y-%m-%d'),
('30-1-2011', '%d-%m-%Y'),
('1/1/2011', '%m/%d/%Y'),
('2011-1-1 00:00:00', '%Y-%m-%d %H:%M:%S'),
('2011-1-1 0:0:0', '%Y-%m-%d %H:%M:%S'),
('2011-1-3T00:00:0', '%Y-%m-%dT%H:%M:%S'))
for dt_string, dt_format in dt_string_to_format:
assert tools._guess_datetime_format(dt_string) == dt_format
def test_guess_datetime_format_for_array(self):
tm._skip_if_not_us_locale()
expected_format = '%Y-%m-%d %H:%M:%S.%f'
dt_string = datetime(2011, 12, 30, 0, 0, 0).strftime(expected_format)
test_arrays = [
np.array([dt_string, dt_string, dt_string], dtype='O'),
np.array([np.nan, np.nan, dt_string], dtype='O'),
np.array([dt_string, 'random_string'], dtype='O'),
]
for test_array in test_arrays:
assert tools._guess_datetime_format_for_array(
test_array) == expected_format
format_for_string_of_nans = tools._guess_datetime_format_for_array(
np.array(
[np.nan, np.nan, np.nan], dtype='O'))
assert format_for_string_of_nans is None
class TestToDatetimeInferFormat(object):
def test_to_datetime_infer_datetime_format_consistent_format(self):
s = pd.Series(pd.date_range('20000101', periods=50, freq='H'))
test_formats = ['%m-%d-%Y', '%m/%d/%Y %H:%M:%S.%f',
'%Y-%m-%dT%H:%M:%S.%f']
for test_format in test_formats:
s_as_dt_strings = s.apply(lambda x: x.strftime(test_format))
with_format = pd.to_datetime(s_as_dt_strings, format=test_format)
no_infer = pd.to_datetime(s_as_dt_strings,
infer_datetime_format=False)
yes_infer = pd.to_datetime(s_as_dt_strings,
infer_datetime_format=True)
# Whether the format is explicitly passed, it is inferred, or
# it is not inferred, the results should all be the same
tm.assert_series_equal(with_format, no_infer)
tm.assert_series_equal(no_infer, yes_infer)
def test_to_datetime_infer_datetime_format_inconsistent_format(self):
s = pd.Series(np.array(['01/01/2011 00:00:00',
'01-02-2011 00:00:00',
'2011-01-03T00:00:00']))
# When the format is inconsistent, infer_datetime_format should just
# fallback to the default parsing
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
s = pd.Series(np.array(['Jan/01/2011', 'Feb/01/2011', 'Mar/01/2011']))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
def test_to_datetime_infer_datetime_format_series_with_nans(self):
s = pd.Series(np.array(['01/01/2011 00:00:00', np.nan,
'01/03/2011 00:00:00', np.nan]))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
def test_to_datetime_infer_datetime_format_series_starting_with_nans(self):
s = pd.Series(np.array([np.nan, np.nan, '01/01/2011 00:00:00',
'01/02/2011 00:00:00', '01/03/2011 00:00:00']))
tm.assert_series_equal(pd.to_datetime(s, infer_datetime_format=False),
pd.to_datetime(s, infer_datetime_format=True))
def test_to_datetime_iso8601_noleading_0s(self):
# GH 11871
s = pd.Series(['2014-1-1', '2014-2-2', '2015-3-3'])
expected = pd.Series([pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-02-02'),
pd.Timestamp('2015-03-03')])
tm.assert_series_equal(pd.to_datetime(s), expected)
tm.assert_series_equal(pd.to_datetime(s, format='%Y-%m-%d'), expected)
class TestDaysInMonth(object):
# tests for issue #10154
def test_day_not_in_month_coerce(self):
assert isna(to_datetime('2015-02-29', errors='coerce'))
assert isna(to_datetime('2015-02-29', format="%Y-%m-%d",
errors='coerce'))
assert isna(to_datetime('2015-02-32', format="%Y-%m-%d",
errors='coerce'))
assert isna(to_datetime('2015-04-31', format="%Y-%m-%d",
errors='coerce'))
def test_day_not_in_month_raise(self):
pytest.raises(ValueError, to_datetime, '2015-02-29',
errors='raise')
pytest.raises(ValueError, to_datetime, '2015-02-29',
errors='raise', format="%Y-%m-%d")
pytest.raises(ValueError, to_datetime, '2015-02-32',
errors='raise', format="%Y-%m-%d")
pytest.raises(ValueError, to_datetime, '2015-04-31',
errors='raise', format="%Y-%m-%d")
def test_day_not_in_month_ignore(self):
assert to_datetime('2015-02-29', errors='ignore') == '2015-02-29'
assert to_datetime('2015-02-29', errors='ignore',
format="%Y-%m-%d") == '2015-02-29'
assert to_datetime('2015-02-32', errors='ignore',
format="%Y-%m-%d") == '2015-02-32'
assert to_datetime('2015-04-31', errors='ignore',
format="%Y-%m-%d") == '2015-04-31'
class TestDatetimeParsingWrappers(object):
def test_does_not_convert_mixed_integer(self):
bad_date_strings = ('-50000', '999', '123.1234', 'm', 'T')
for bad_date_string in bad_date_strings:
assert not parsing._does_string_look_like_datetime(bad_date_string)
good_date_strings = ('2012-01-01',
'01/01/2012',
'Mon Sep 16, 2013',
'01012012',
'0101',
'1-1', )
for good_date_string in good_date_strings:
assert parsing._does_string_look_like_datetime(good_date_string)
def test_parsers(self):
# https://github.com/dateutil/dateutil/issues/217
import dateutil
yearfirst = dateutil.__version__ >= LooseVersion('2.5.0')
cases = {'2011-01-01': datetime(2011, 1, 1),
'2Q2005': datetime(2005, 4, 1),
'2Q05': datetime(2005, 4, 1),
'2005Q1': datetime(2005, 1, 1),
'05Q1': datetime(2005, 1, 1),
'2011Q3': datetime(2011, 7, 1),
'11Q3': datetime(2011, 7, 1),
'3Q2011': datetime(2011, 7, 1),
'3Q11': datetime(2011, 7, 1),
# quarterly without space
'2000Q4': datetime(2000, 10, 1),
'00Q4': datetime(2000, 10, 1),
'4Q2000': datetime(2000, 10, 1),
'4Q00': datetime(2000, 10, 1),
'2000q4': datetime(2000, 10, 1),
'2000-Q4': datetime(2000, 10, 1),
'00-Q4': datetime(2000, 10, 1),
'4Q-2000': datetime(2000, 10, 1),
'4Q-00': datetime(2000, 10, 1),
'00q4': datetime(2000, 10, 1),
'2005': datetime(2005, 1, 1),
'2005-11': datetime(2005, 11, 1),
'2005 11': datetime(2005, 11, 1),
'11-2005': datetime(2005, 11, 1),
'11 2005': datetime(2005, 11, 1),
'200511': datetime(2020, 5, 11),
'20051109': datetime(2005, 11, 9),
'20051109 10:15': datetime(2005, 11, 9, 10, 15),
'20051109 08H': datetime(2005, 11, 9, 8, 0),
'2005-11-09 10:15': datetime(2005, 11, 9, 10, 15),
'2005-11-09 08H': datetime(2005, 11, 9, 8, 0),
'2005/11/09 10:15': datetime(2005, 11, 9, 10, 15),
'2005/11/09 08H': datetime(2005, 11, 9, 8, 0),
"Thu Sep 25 10:36:28 2003": datetime(2003, 9, 25, 10,
36, 28),
"Thu Sep 25 2003": datetime(2003, 9, 25),
"Sep 25 2003": datetime(2003, 9, 25),
"January 1 2014": datetime(2014, 1, 1),
# GH 10537
'2014-06': datetime(2014, 6, 1),
'06-2014': datetime(2014, 6, 1),
'2014-6': datetime(2014, 6, 1),
'6-2014': datetime(2014, 6, 1),
'20010101 12': datetime(2001, 1, 1, 12),
'20010101 1234': datetime(2001, 1, 1, 12, 34),
'20010101 123456': datetime(2001, 1, 1, 12, 34, 56),
}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str,
yearfirst=yearfirst)
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(np.array([date_str], dtype=object),
yearfirst=yearfirst)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
assert res == expected
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([pd.Timestamp(expected)])
tm.assert_index_equal(res, exp)
# these really need to have yearfirst, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
result7 = date_range(date_str, freq='S', periods=1,
yearfirst=yearfirst)
assert result7 == expected
# NaT
result1, _, _ = tools.parse_time_string('NaT')
result2 = to_datetime('NaT')
result3 = Timestamp('NaT')
result4 = DatetimeIndex(['NaT'])[0]
assert result1 is tslib.NaT
assert result2 is tslib.NaT
assert result3 is tslib.NaT
assert result4 is tslib.NaT
def test_parsers_quarter_invalid(self):
cases = ['2Q 2005', '2Q-200A', '2Q-200', '22Q2005', '6Q-20', '2Q200.']
for case in cases:
pytest.raises(ValueError, tools.parse_time_string, case)
def test_parsers_dayfirst_yearfirst(self):
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
is_lt_253 = dateutil.__version__ < LooseVersion('2.5.3')
# str : dayfirst, yearfirst, expected
cases = {'10-11-12': [(False, False,
datetime(2012, 10, 11)),
(True, False,
datetime(2012, 11, 10)),
(False, True,
datetime(2010, 11, 12)),
(True, True,
datetime(2010, 12, 11))],
'20/12/21': [(False, False,
datetime(2021, 12, 20)),
(True, False,
datetime(2021, 12, 20)),
(False, True,
datetime(2020, 12, 21)),
(True, True,
datetime(2020, 12, 21))]}
for date_str, values in compat.iteritems(cases):
for dayfirst, yearfirst, expected in values:
# odd comparisons across version
# let's just skip
if dayfirst and yearfirst and is_lt_253:
continue
# compare with dateutil result
dateutil_result = parse(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
assert dateutil_result == expected
result1, _, _ = tools.parse_time_string(date_str,
dayfirst=dayfirst,
yearfirst=yearfirst)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
assert result2 == expected
result3 = to_datetime(date_str, dayfirst=dayfirst,
yearfirst=yearfirst)
result4 = DatetimeIndex([date_str], dayfirst=dayfirst,
yearfirst=yearfirst)[0]
assert result1 == expected
assert result3 == expected
assert result4 == expected
def test_parsers_timestring(self):
# must be the same as dateutil result
cases = {'10:15': (parse('10:15'), datetime(1, 1, 1, 10, 15)),
'9:05': (parse('9:05'), datetime(1, 1, 1, 9, 5))}
for date_str, (exp_now, exp_def) in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
assert result1 == exp_def
assert result2 == exp_now
assert result3 == exp_now
assert result4 == exp_now
assert result5 == exp_now
def test_parsers_time(self):
# GH11818
_skip_if_has_locale()
strings = ["14:15", "1415", "2:15pm", "0215pm", "14:15:00", "141500",
"2:15:00pm", "021500pm", time(14, 15)]
expected = time(14, 15)
for time_string in strings:
assert tools.to_time(time_string) == expected
new_string = "14.15"
pytest.raises(ValueError, tools.to_time, new_string)
assert tools.to_time(new_string, format="%H.%M") == expected
arg = ["14:15", "20:20"]
expected_arr = [time(14, 15), time(20, 20)]
assert tools.to_time(arg) == expected_arr
assert tools.to_time(arg, format="%H:%M") == expected_arr
assert tools.to_time(arg, infer_time_format=True) == expected_arr
assert tools.to_time(arg, format="%I:%M%p",
errors="coerce") == [None, None]
res = tools.to_time(arg, format="%I:%M%p", errors="ignore")
tm.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_))
with pytest.raises(ValueError):
tools.to_time(arg, format="%I:%M%p", errors="raise")
tm.assert_series_equal(tools.to_time(Series(arg, name="test")),
Series(expected_arr, name="test"))
res = tools.to_time(np.array(arg))
assert isinstance(res, list)
assert res == expected_arr
def test_parsers_monthfreq(self):
cases = {'201101': datetime(2011, 1, 1, 0, 0),
'200005': datetime(2000, 5, 1, 0, 0)}
for date_str, expected in compat.iteritems(cases):
result1, _, _ = tools.parse_time_string(date_str, freq='M')
assert result1 == expected
def test_parsers_quarterly_with_freq(self):
msg = ('Incorrect quarterly string is given, quarter '
'must be between 1 and 4: 2013Q5')
with tm.assert_raises_regex(parsing.DateParseError, msg):
tools.parse_time_string('2013Q5')
# GH 5418
msg = ('Unable to retrieve month information from given freq: '
'INVLD-L-DEC-SAT')
with tm.assert_raises_regex(parsing.DateParseError, msg):
tools.parse_time_string('2013Q1', freq='INVLD-L-DEC-SAT')
cases = {('2013Q2', None): datetime(2013, 4, 1),
('2013Q2', 'A-APR'): datetime(2012, 8, 1),
('2013-Q2', 'A-DEC'): datetime(2013, 4, 1)}
for (date_str, freq), exp in compat.iteritems(cases):
result, _, _ = tools.parse_time_string(date_str, freq=freq)
assert result == exp
def test_parsers_timezone_minute_offsets_roundtrip(self):
# GH11708
base = to_datetime("2013-01-01 00:00:00")
dt_strings = [
('2013-01-01 05:45+0545',
"Asia/Katmandu",
"Timestamp('2013-01-01 05:45:00+0545', tz='Asia/Katmandu')"),
('2013-01-01 05:30+0530',
"Asia/Kolkata",
"Timestamp('2013-01-01 05:30:00+0530', tz='Asia/Kolkata')")
]
for dt_string, tz, dt_string_repr in dt_strings:
dt_time = to_datetime(dt_string)
assert base == dt_time
converted_time = dt_time.tz_localize('UTC').tz_convert(tz)
assert dt_string_repr == repr(converted_time)
def test_parsers_iso8601(self):
# GH 12060
# test only the iso parser - flexibility to different
# separators and leadings 0s
# Timestamp construction falls back to dateutil
cases = {'2011-01-02': datetime(2011, 1, 2),
'2011-1-2': datetime(2011, 1, 2),
'2011-01': datetime(2011, 1, 1),
'2011-1': datetime(2011, 1, 1),
'2011 01 02': datetime(2011, 1, 2),
'2011.01.02': datetime(2011, 1, 2),
'2011/01/02': datetime(2011, 1, 2),
'2011\\01\\02': datetime(2011, 1, 2),
'2013-01-01 05:30:00': datetime(2013, 1, 1, 5, 30),
'2013-1-1 5:30:00': datetime(2013, 1, 1, 5, 30)}
for date_str, exp in compat.iteritems(cases):
actual = tslib._test_parse_iso8601(date_str)
assert actual == exp
# seperators must all match - YYYYMM not valid
invalid_cases = ['2011-01/02', '2011^11^11',
'201401', '201111', '200101',
# mixed separated and unseparated
'2005-0101', '200501-01',
'20010101 12:3456', '20010101 1234:56',
# HHMMSS must have two digits in each component
# if unseparated
'20010101 1', '20010101 123', '20010101 12345',
'20010101 12345Z',
# wrong separator for HHMMSS
'2001-01-01 12-34-56']
for date_str in invalid_cases:
with pytest.raises(ValueError):
tslib._test_parse_iso8601(date_str)
# If no ValueError raised, let me know which case failed.
raise Exception(date_str)
class TestArrayToDatetime(object):
def test_try_parse_dates(self):
arr = np.array(['5/1/2000', '6/1/2000', '7/1/2000'], dtype=object)
result = parsing.try_parse_dates(arr, dayfirst=True)
expected = [parse(d, dayfirst=True) for d in arr]
assert np.array_equal(result, expected)
def test_parsing_valid_dates(self):
arr = np.array(['01-01-2013', '01-02-2013'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np_array_datetime64_compat(
[
'2013-01-01T00:00:00.000000000-0000',
'2013-01-02T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
arr = np.array(['Mon Sep 16 2013', 'Tue Sep 17 2013'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr),
np_array_datetime64_compat(
[
'2013-09-16T00:00:00.000000000-0000',
'2013-09-17T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_parsing_timezone_offsets(self):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added
dt_strings = [
'01-01-2013 08:00:00+08:00',
'2013-01-01T08:00:00.000000000+0800',
'2012-12-31T16:00:00.000000000-0800',
'12-31-2012 23:00:00-01:00'
]
expected_output = tslib.array_to_datetime(np.array(
['01-01-2013 00:00:00'], dtype=object))
for dt_string in dt_strings:
tm.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([dt_string], dtype=object)
),
expected_output
)
def test_number_looking_strings_not_into_datetime(self):
# #4601
# These strings don't look like datetimes so they shouldn't be
# attempted to be converted
arr = np.array(['-352.737091', '183.575577'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
arr = np.array(['1', '2', '3', '4', '5'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
def test_coercing_dates_outside_of_datetime64_ns_bounds(self):
invalid_dates = [
date(1000, 1, 1),
datetime(1000, 1, 1),
'1000-01-01',
'Jan 1, 1000',
np.datetime64('1000-01-01'),
]
for invalid_date in invalid_dates:
pytest.raises(ValueError,
tslib.array_to_datetime,
np.array([invalid_date], dtype='object'),
errors='raise', )
tm.assert_numpy_array_equal(
tslib.array_to_datetime(
np.array([invalid_date], dtype='object'),
errors='coerce'),
np.array([tslib.iNaT], dtype='M8[ns]')
)
arr = np.array(['1/1/1000', '1/1/2000'], dtype=object)
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np_array_datetime64_compat(
[
tslib.iNaT,
'2000-01-01T00:00:00.000000000-0000'
],
dtype='M8[ns]'
)
)
def test_coerce_of_invalid_datetimes(self):
arr = np.array(['01-01-2013', 'not_a_date', '1'], dtype=object)
# Without coercing, the presence of any invalid dates prevents
# any values from being converted
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='ignore'), arr)
# With coercing, the invalid dates becomes iNaT
tm.assert_numpy_array_equal(
tslib.array_to_datetime(arr, errors='coerce'),
np_array_datetime64_compat(
[
'2013-01-01T00:00:00.000000000-0000',
tslib.iNaT,
tslib.iNaT
],
dtype='M8[ns]'
)
)
def test_normalize_date():
value = date(2012, 9, 7)
result = normalize_date(value)
assert (result == datetime(2012, 9, 7))
value = datetime(2012, 9, 7, 12)
result = normalize_date(value)
assert (result == datetime(2012, 9, 7))
@pytest.fixture(params=['D', 's', 'ms', 'us', 'ns'])
def units(request):
return request.param
@pytest.fixture
def epoch_1960():
# for origin as 1960-01-01
return Timestamp('1960-01-01')
@pytest.fixture
def units_from_epochs():
return list(range(5))
@pytest.fixture(params=[epoch_1960(),
epoch_1960().to_pydatetime(),
epoch_1960().to_datetime64(),
str(epoch_1960())])
def epochs(request):
return request.param
@pytest.fixture
def julian_dates():
return pd.date_range('2014-1-1', periods=10).to_julian_date().values
class TestOrigin(object):
def test_to_basic(self, julian_dates):
# gh-11276, gh-11745
# for origin as julian
result = Series(pd.to_datetime(
julian_dates, unit='D', origin='julian'))
expected = Series(pd.to_datetime(
julian_dates - pd.Timestamp(0).to_julian_date(), unit='D'))
assert_series_equal(result, expected)
result = Series(pd.to_datetime(
[0, 1, 2], unit='D', origin='unix'))
expected = Series([Timestamp('1970-01-01'),
Timestamp('1970-01-02'),
Timestamp('1970-01-03')])
assert_series_equal(result, expected)
# default
result = Series(pd.to_datetime(
[0, 1, 2], unit='D'))
expected = Series([Timestamp('1970-01-01'),
Timestamp('1970-01-02'),
Timestamp('1970-01-03')])
assert_series_equal(result, expected)
def test_julian_round_trip(self):
result = pd.to_datetime(2456658, origin='julian', unit='D')
assert result.to_julian_date() == 2456658
# out-of-bounds
with pytest.raises(ValueError):
pd.to_datetime(1, origin="julian", unit='D')
def test_invalid_unit(self, units, julian_dates):
# checking for invalid combination of origin='julian' and unit != D
if units != 'D':
with pytest.raises(ValueError):
pd.to_datetime(julian_dates, unit=units, origin='julian')
def test_invalid_origin(self):
# need to have a numeric specified
with pytest.raises(ValueError):
pd.to_datetime("2005-01-01", origin="1960-01-01")
with pytest.raises(ValueError):
pd.to_datetime("2005-01-01", origin="1960-01-01", unit='D')
def test_epoch(self, units, epochs, epoch_1960, units_from_epochs):
expected = Series(
[pd.Timedelta(x, unit=units) +
epoch_1960 for x in units_from_epochs])
result = Series(pd.to_datetime(
units_from_epochs, unit=units, origin=epochs))
assert_series_equal(result, expected)
@pytest.mark.parametrize("origin, exc",
[('random_string', ValueError),
('epoch', ValueError),
('13-24-1990', ValueError),
(datetime(1, 1, 1), tslib.OutOfBoundsDatetime)])
def test_invalid_origins(self, origin, exc, units, units_from_epochs):
with pytest.raises(exc):
pd.to_datetime(units_from_epochs, unit=units,
origin=origin)
def test_invalid_origins_tzinfo(self):
# GH16842
with pytest.raises(ValueError):
pd.to_datetime(1, unit='D',
origin=datetime(2000, 1, 1, tzinfo=pytz.utc))
def test_processing_order(self):
# make sure we handle out-of-bounds *before*
# constructing the dates
result = pd.to_datetime(200 * 365, unit='D')
expected = Timestamp('2169-11-13 00:00:00')
assert result == expected
result = pd.to_datetime(200 * 365, unit='D', origin='1870-01-01')
expected = Timestamp('2069-11-13 00:00:00')
assert result == expected
result = pd.to_datetime(300 * 365, unit='D', origin='1870-01-01')
expected = Timestamp('2169-10-20 00:00:00')
assert result == expected
| apache-2.0 |
google-research/social_cascades | news/main_baseline.py | 1 | 4858 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Baselines for classification task."""
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import pandas as pd
import sklearn
from sklearn import metrics
from sklearn import svm
import xgboost as xgb
if not __package__:
import utils # pylint: disable=g-bad-import-order,g-import-not-at-top
import utils_gcs # pylint: disable=g-bad-import-order,g-import-not-at-top
else:
from gnns_for_news import utils # pylint: disable=g-bad-import-order,g-import-not-at-top
from gnns_for_news import utils_gcs # pylint: disable=g-bad-import-order,g-import-not-at-top
FLAGS = flags.FLAGS
flags.DEFINE_string(
'task', 'cat', ('task: sr(subreddit classification), '
'cat(url categorization), fake(fake news detection)'))
flags.DEFINE_string('embedding', 'bert',
'embedding: bert(for post title), graph(for user)')
flags.DEFINE_string('gcs_path_in', None, 'gcs bucket input directory')
flags.DEFINE_string('gcs_path_out', None, 'gcs bucket output directory')
flags.DEFINE_string('local_path', './fake_input/', 'graph csv/gpickle file')
flags.DEFINE_string('g_emb', '', 'graph embedding file')
flags.DEFINE_string('seq_file', '', 'post sequence file')
flags.DEFINE_string('balance_df', '', 'the balanced dataset with url ids')
# Classification parameters
flags.DEFINE_string('model', 'xgboost', 'xgboost, svm')
flags.DEFINE_float('train_ratio', 0.7, 'training data ratio')
flags.DEFINE_integer('epochs', 10, 'number of epochs')
# Flag specifications
flags.mark_flag_as_required('gcs_path_in')
flags.mark_flag_as_required('gcs_path_out')
flags.mark_flag_as_required('g_emb')
flags.mark_flag_as_required('seq_file')
def logging_info_test(test_result):
if len(test_result) == 2:
logging.info(('Test Acc : %.4f | Test F1 : %.4f'), *test_result)
else:
logging.info(('Test Acc %.4f | Test Micro-F1 : %.4f | '
'Test Macro-F1 : %.4f | Test Weighted Macro-F1 : %.4f'),
*test_result)
def evaluate_test(y_true, y_pred):
"""Evaluates test data."""
if len(set(y_true)) == 2:
test_result = [metrics.accuracy_score(y_true, y_pred),
metrics.f1_score(y_true, y_pred)]
else:
test_result = [metrics.accuracy_score(y_true, y_pred),
metrics.f1_score(y_true, y_pred, average='micro'),
metrics.f1_score(y_true, y_pred, average='macro'),
metrics.f1_score(y_true, y_pred, average='weighted')]
logging_info_test(test_result)
return test_result
def main(_):
if not os.path.exists(FLAGS.local_path):
utils_gcs.download_files_from_gcs(FLAGS.local_path, FLAGS.gcs_path_in)
logging.info('Data downloaded successfully!')
sequence_df = pd.read_hdf(
os.path.join(FLAGS.local_path, FLAGS.seq_file), 'df')
if FLAGS.balance_df:
balance_df = pd.read_hdf(
os.path.join(FLAGS.local_path, FLAGS.balance_df), 'df')
sequence_df = sequence_df[sequence_df['url'].isin(balance_df['url'])]
if FLAGS.embedding == 'graph':
embeddings_dict = utils.get_n2v_graph_embedding(
os.path.join(FLAGS.local_path, FLAGS.g_emb), graph_gen=False,
normalize_type='minmax')
x_sequence, y_label, _ = utils.load_input_with_label(
sequence_df, embeddings_dict, FLAGS.task)
elif FLAGS.embedding == 'bert':
x_sequence, y_label, _ = utils.load_bert_input_with_label(
sequence_df, FLAGS.task, pooling='average')
x_averaged = np.array([np.mean(seq, axis=0) for seq in x_sequence])
# model training/testing
logging.info('Classifier : %s', FLAGS.model)
if FLAGS.model == 'svm':
model = svm.SVC()
elif FLAGS.model == 'xgboost':
model = xgb.XGBClassifier()
test_results = []
for epoch in range(FLAGS.epochs):
logging.info('Epoch %s', epoch)
x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(
x_averaged, y_label, train_size=FLAGS.train_ratio)
model.fit(x_train, y_train)
pred_y_test = model.predict(x_test)
test_results.append(evaluate_test(y_test, pred_y_test))
test_results = np.mean(np.array(test_results), axis=0)
logging.info('Average results of %d epochs ', FLAGS.epochs)
logging_info_test(test_results)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
18padx08/PPTex | JinjaPPExtension.py | 1 | 24565 | #!/usr/bin/python
from os import sys,getcwd
sys.path += ['/usr/texbin','', '//anaconda/lib/python27.zip', '//anaconda/lib/python2.7', '//anaconda/lib/python2.7/plat-darwin', '//anaconda/lib/python2.7/plat-mac', '//anaconda/lib/python2.7/plat-mac/lib-scriptpackages', '//anaconda/lib/python2.7/lib-tk', '//anaconda/lib/python2.7/lib-old', '//anaconda/lib/python2.7/lib-dynload', '//anaconda/lib/python2.7/site-packages', '//anaconda/lib/python2.7/site-packages/PIL', '//anaconda/lib/python2.7/site-packages/setuptools-2.1-py2.7.egg']
from jinja2 import nodes, contextfunction
from jinja2.ext import Extension
from jinja2 import Environment, FileSystemLoader
from jinja2.exceptions import TemplateSyntaxError
import numpy as np
from sympy import Symbol, sympify, lambdify, latex
import sympy as sp
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plot
import subprocess
import re
class PPException(Exception):
pass
class PPExtension(Extension):
tags = set(['figure', 'table', 'calcTable', 'evaluate', 'evaltex'])
def __init(self, environment):
super(PPExtension, self).__init__(environment)
#add defaults
environment.extend(error_calculation='gauss', data_mode='tuples', print_figure_for_each_table=True)
def parse(self, parser):
#the token
lineno = parser.stream.next()
linnum = lineno.lineno
if(lineno.value == 'figure'):
#ther argument
arg = [parser.parse_expression()]
#the figure data
if (parser.stream.skip_if('comma')):
arg.append(parser.parse_expression())
body = parser.parse_statements(['name:endfigure'], drop_needle=True)
return nodes.CallBlock(self.call_method('_create_figure', arg),[], [], body).set_lineno(linnum)
elif(lineno.value == 'table'):
arg = [parser.parse_expression()]
return nodes.Output([self.call_method('_print_latex_table', arg)])
elif(lineno.value == 'evaluate'):
arg = [parser.parse_expression()]
return nodes.Output([self.call_method('_evaluate_function', arg)])
elif(lineno.value == 'evaltex'):
arg = [parser.parse_expression()]
return nodes.Output([self.call_method('_evaltex_function', arg)])
elif( lineno.value == 'calcTable'):
arg = [parser.parse_expression()]
return nodes.Output([self.call_method('_calcTable_function', arg)])
#body = parser.parse_statements(['name:endfigure'], drop_needle=True)
return nodes.Const(None)
def _getValue(self, r,c,table, regExps):
print('begin getValue')
table[r,c] = str(table[r,c]).replace("??", str(c))
table[r,c] = str(table[r,c]).replace("##", str(r))
print(table)
try:
print("is it a value?", table[r,c])
return np.round(float(table[r,c]), 3)
except(ValueError):
# print("no it's not")
#got string try parse it
#for reg in regExps:
val = self._parseValue(r,c,table[r,c],table, regExps)
# print('finished parsing')
if val is not None: return val
return 0
def _parseValue(self, row, column, entry, table, regExps):
value = 0
print('sp lets try parse it')
for reg,callBack in regExps:
# print('before regex')
temp = reg.finditer(entry)
# print('did regex match?')
cor= 0
if temp:
for match in temp:
# print('try callback')
result = callBack(row, column, entry, table, match, regExps, cor)
# print('I have some result')
cor += result[0]
entry = result[1]
try:
print(entry)
value = eval(entry)
except(Exception):
return str(entry)
return np.round(value,3)
#return str(value)
#callback function for regular expression single value
def SingleValFound(self, row, column, entry, table, match, regExps, cor):
tup = match.group().replace('$', '')
#print(tup)
r,c = tup.split(',')
r = row if int(r) < 0 else r
c = column if int(c) < 0 else c
#print(r,c)
tmpVal = str(self._getValue(r,c,table, regExps))
#print('tmpVal', tmpVal)
entry = entry[0:match.start()-cor] + tmpVal + entry[match.end()-cor:]
return [len(match.group()) - len(tmpVal), entry]
def _calcTable_function(self, data):
xheader = data['xheader']
yheader = data['yheader']
#build up the regex for formula $1,2$ means second row third column $(0:1,0:1)$ the rect (0,0) - (0,1) which yields an array with every entry putting them into an array with same dimension
# | |
# (1,0) - (1,1)
#replace every placeholder with the value, putting 0 if the index is not valid
singleVal = re.compile('\$-?\d*,-?\d*\$')
table = np.array(data['table'])
print table
for row in range(np.shape(table)[0]):
print(row)
for column in range(np.shape(table)[1]):
print ("parse (",row,column,")")
blub = []
blub.append([singleVal, self.SingleValFound])
value = self._getValue(row, column, table, blub)
# print(value)
table[row,column] = value
datArr = {}
print('table construction completed')
datArr['extended'] = True
datArr['xheader'] = xheader
datArr['yheader'] = yheader
datArr['xdata'] = []
if 'group' in data:
datArr['group'] = data['group']
if 'startat' in data:
datArr['startat'] = data['startat']
print('building up data array')
for c in range(np.shape(table)[1]):
#print(c)
datArr['xdata'].append(table[:,c].tolist())
datArr['desc'] = data['desc']
figstr = ''
print('lets create a np array')
bigarray = []
print('lets go')
if 'figure' in data:
print( data['figure'])
for fig in data['figure']:
if not 'function' in fig:
xrow = int(fig['xrow'])
yrow = int(fig['yrow'])
print(xrow, yrow)
print(table[:,xrow])
xdata = table[:,xrow].astype(np.float)
ydata = table[:,yrow].astype(np.float)
#print(xdata, ydata)
xmin = np.min(xdata)
#print(xmin)
xmax = np.max(xdata)
ymin = np.min(ydata)
ymax = np.max(ydata)
if 'xmin' and 'xmax' and 'ymin' and 'ymax' in fig['range']:
xmax = fig['range']['xmax']
xmin = fig['range']['xmin']
ymax = fig['range']['ymax']
ymin = fig['range']['ymin']
#print(xmin,xmax,ymin,ymax)
rang = [xmin, xmax, ymin, ymax]
# print (rang)
title = fig['title']
desc = data['desc']
ylabel = fig['ylabel']
xlabel = fig['xlabel']
ref = fig['ref']
figureArray = {}
figureArray['xdata'] = xdata.tolist()
figureArray['ydata'] = ydata.tolist()
figureArray['title'] = title
figureArray['desc'] = desc
figureArray['range'] = rang
if 'interpolate' in fig:
figureArray['dim'] = fig['dim']
figureArray['interpolate'] = fig['interpolate']
if 'slope' in fig:
figureArray['slope'] = fig['slope']
print('try creating figure', figureArray)
else:
title = fig['title']
desc = data['desc']
ylabel = fig['ylabel']
xlabel = fig['xlabel']
ref = fig['ref']
xmin = fig['xmin']
xmax = fig['xmax']
try:
f = sp.lambdify("x",sp.sympify(fig['function']),'numpy')
except(Exception):
raise TemplateSyntaxError("Could not parse function '" + fig['function'] + "'", 100);
try:
print(f)
ymin = np.min(f(np.linspace(xmin,xmax,1000)))
ymax = np.max(f(np.linspace(xmin,xmax,1000)))
rang = [xmin, xmax, ymin, ymax]
except(Exception):
raise TemplateSyntaxError("Could not evaluate function", 100)
figureArray = {}
figureArray['title'] = title
figureArray['desc'] = desc
figureArray['range'] = rang
figureArray['function'] = fig['function']
bigarray.append(figureArray)
indices = []
print(bigarray)
bigarray = np.array(bigarray)
print(bigarray)
if 'combineFigures' in data:
for group in data['combineFigures']:
print('\n\n\n mygroup\n\n\n',bigarray[group])
figstr += self._create_figure(ref, {'data': bigarray[group], 'ylabel':ylabel, 'xlabel':xlabel}, fig['caption'])
indices = indices + group
loopcounter = 0
for f in bigarray:
if loopcounter in indices:
print('already printed')
loopcounter +=1
continue
print("before function create figure", figstr)
try:
figstr += self._create_figure(ref, {'data': [f], 'ylabel':ylabel, 'xlabel':xlabel}, fig['caption'])
except(Exception):
raise TemplateSyntaxError("function call was invalid", 100)
print("I created a figure")
loopcounter +=1
print('try printing the table')
print(figstr)
return self._print_latex_table(datArr) + figstr
def _evaltex_function(self, data):
try:
s = sympify(data['function'])
except:
raise TemplateSyntaxError("could not parse formula", 01)
try:
l = latex(s)
s = s.doit()
except:
raise TemplateSyntaxError("could either not make latex output or simpify", 1)
l2 = None
#errors is ignoring step
if 'step' in data:
l2 = latex(s)
#print(latex(s))
vals = []
syms = []
indep = []
unindep = []
try:
# print(data['symbols'])
for symbol in data['symbols']:
# print(symbol)
# print(symbol['sym'], symbol['val'])
syms.append(Symbol(symbol['sym']))
vals.append(symbol['val'])
if 'indep' in symbol:
indep.append([syms[-1], symbol['uncert'], vals[-1]])
else:
unindep.append([syms[-1], vals[-1]])
except:
raise TemplateSyntaxError("something went wrong parsing symbols", 100)
#print(syms, vals)
# print(syms, vals, indep, s)
try:
my_function = lambdify(syms, s, 'numpy')
result = my_function(*vals)
#print("check if error is set", result)
if 'errors' in data:
#start looping through all variables in an extra arra
error_terms = 0
partial_terms = []
partial_terms_squared = []
uncerts = []
# print(l + " = " + str(result))
try:
for ind in indep:
#loop through variables
d = Symbol('s_' + ind[0].name)
partial = sp.diff(s, ind[0]) * d/s
partial_terms.append(partial)
partial_terms_squared.append(partial**2)
error_terms = error_terms + partial**2
uncerts.append([d, str(ind[1])])
except:
raise TemplateSyntaxError("error on building up error_terms", 15)
#make substitutions
# print("begin substitution", error_terms)
error_terms = error_terms**0.5
ptsv1 = []
try:
for pt in partial_terms_squared:
ptsv = pt
# print("substitution started" )
#substitue first all dependend variables
for ind in indep:
# print(ind)
try:
ptsv = ptsv.subs(ind[0], ind[-1])
ptsv = ptsv.subs('s_' + ind[0].name, ind[1])
except:
raise TemplateSyntaxError("Could not substitued dependend var", 100)
for unind in unindep:
# print(unind)
try:
ptsv = ptsv.subs(unind[0], unind[1])
except:
raise TemplateSyntaxError("Could not substitued undependend var", 100)
ptsv1.append(ptsv)
except:
raise TemplateSyntaxError("the substitution failed for error calculation", 10)
#error
uval = sp.sqrt(sum(ptsv1))
rresult = np.round(result, data['digits'] if 'digits' in data else 5)
#print(rresult)
#print(uval)
error = (uval * result).round(data['digits'] if 'digits' in data else 5)
#print(rresult, error)
return """\\(""" + (data['fname'] if 'fname' in data else "f") + """ = """ + l + """ = """ + str(rresult) + """ \pm """ + str(abs(error)) + (data['units'] if 'units' in data else "") + """\\)
Error is calculated according to standard error propagation:
\\begin{dmath}
s_{""" + (data['fname'] if 'fname' in data else "f") +"""} = """ + latex(error_terms) + """ = """ + str(abs(error.round(data['digits'] if 'digits' in data else 5))) +(data['units'] if 'units' in data else "" )+ """
\\end{dmath}
with uncertainities: \\(""" + ",".join([latex(cert[0]) + ' = ' + cert[1] for cert in uncerts]) +"""\\)
"""
#print(result)
except:
raise TemplateSyntaxError("could not evaluate formula", 100)
try:
if 'supRes' in data:
return l
elif 'step' in data:
return l + " = " + l2 + " = " + str(result)
return l + " = " + str(result)
except:
raise TemplateSyntaxError("Malformed result...", 100)
# dictionary with entries
# data
# |_ [xdata, ydata, desc]
# xlabel
# ylabel
def _print_latex_table(self, data):
if 'extended' in data:
#we have in xdata an array and there is an array xheader and yheader (optional otherwise same as xheader) where xheader matches size of xdata and yheader matches size of one entry array of xdata
#at least one entry
#print("latex print function", data)
ylen = len(data['xdata'][0])
#since len(xheader) and len (xdata) should match we take xheader
xlen = len(data['xheader'])
#the xheader string (since latex builds tables per row)
yheader = data['yheader'] if 'yheader' in data else []
xheader = "&" if len(yheader) >0 else ""
print(data['xheader'], yheader)
#xheader += "&".join(data['xheader'])
isfirst = True
for h in data['xheader']:
if isfirst:
xheader += "\\textbf{" + str(h) + "}"
isfirst = False
else:
xheader += "&\\textbf{" + str(h) + "}"
table = '\\begin{table}\\centering\\begin{tabular}{' + 'c' * (xlen+ (1 if len(yheader) > 0 else 0)) +'}'
#table += "\\hline\n"
table += xheader + "\\\\\n\\cline{2-" + str(xlen+1) + "}"
#first = True
#now iterate over all rows, remember to print in the first row the yheader if there is one
for i in xrange(0, ylen):
first = True
if(len(yheader) > 0):
try:
table += "\\multicolumn{1}{r|}{\\textbf{" + str(data['yheader'][i]) + "}}"
except:
if i > len(data['yheader'])-1:
print("dimension of yheader is wrong")
print("ooooops there is an error in yheader")
raise TemplateSyntaxError("Yheader is wrong: probably inconsistencies in dimension", i)
for o in xrange(0,xlen):
try:
grouping = -1
startat = 0
if 'group' in data:
grouping = data['group']
if 'startat' in data:
startat = data['startat']
if len(yheader) >0:
if o == xlen-1:
table += "&\multicolumn{1}{c|}{" + str(data['xdata'][o][i]) + "}"
elif grouping > 0 and (o - startat) % grouping == 0:
table += "&\multicolumn{1}{|c}{" + str(data['xdata'][o][i]) + "}"
else:
print(data['xdata'][o][i])
table += "&" + str(data['xdata'][o][i])
else:
if not first:
table += "&"
first = False
if o == xlen-1:
table += "\multicolumn{1}{c|}{" + str(data['xdata'][o][i]) + "}"
else:
#print(data['xdata'][o][i])
table += str(data['xdata'][o][i])
except:
print("some error at: ", o, i)
raise TemplateSyntaxError("some error while parsing table data: ("+str(o)+","+str(i)+")" , o)
#raise PPException("Error while parsing datapoints, probably missing an entry; check dimensions")
#print(table)
table += "\\\\\\cline{2-" + str(xlen+1) + "}\n"
print(data['desc'])
table += "\\end{tabular} \\caption{" + str(data['desc']) + "} \\end{table}\n"
#print (table)
else:
for tab in data['data']:
table = "\\begin{figure}\\centering\\begin{tabular}{|c|c|}"
i = 0
table += "\\hline\n"
table += str(data['xlabel']) + " & " + str(data['ylabel']) + "\\\\\n"
table += "\\hline\n"
for entry in tab['xdata']:
table += str(entry) + " & " + str(tab['ydata'][i]) + "\\\\\n"
table += "\\hline\n"
i+=1
table += "\\end{tabular} \\caption{" + str(tab['desc']) + "} \\end{figure}\n"
print('oke returning')
return table
# dictionary with entries
# data
# |_ (xdata,ydata,range = [xmin,xmax,ymin,ymax], title, interpolate)
# ylabel
# xlabel
def _create_figure(self, title, data, caller):
print("test")
plot.figure()
slopeinter = ''
#foreach data set in data print a figure
i = 0;o=0; colors=["blue", "green", "#ffaca0"]
for fig in data['data']:
print("datacount",len(data['data']))
if 'range' in fig and len(data['data']) <=1:
plot.axis(fig['range'])
if 'interpolate' in fig :
f = np.polyfit(fig['xdata'],fig['ydata'], fig['dim'] if 'dim' in fig else 1)
print("slope-intercept",f[0])
if 'slope' in fig:
slopeinter = "y = "+str(np.round(f[0], 3)) + "x + " + str(np.round(f[1],3))
#plot.annotate("y = " + f[0]+"*x + "+ f[1], xy=(1,1), xytext=(1,1.5), arrowprops=dict(facecolor='black', shrink=0.05),)
f_n = np.poly1d(f)
xnew = np.linspace(fig['range'][0], fig['range'][1], 10000)
plot.plot(xnew, f_n(xnew), label = slopeinter, color=colors[i % (len(colors))])
i += 1
if 'function' in fig:
try:
f = sp.lambdify("x",sp.sympify(fig['function']), "numpy")
except(Exception):
raise TemplateSyntaxError("Could not lambdify function in createFigure", 100)
print(f(2))
try:
datArr = []
xes = np.linspace(fig['range'][0], fig['range'][1], 1000)
for x in xes:
datArr += [f(x)]
print(len(datArr), len(xes))
if 'inverse' in fig:
# plot.plot(datArr,xes, label=fig['title'], color=colors[o % (len(colors))])
pass
else:
plot.plot(xes,datArr, label=fig['title'], color=colors[o % (len(colors))])
except(Exception):
raise TemplateSyntaxError("Could not plot figure", 100)
else:
plot.plot(fig['xdata'], fig['ydata'], label=fig['title'], linestyle="none", color=colors[o % (len(colors))], marker="s", markersize=7)
plot.legend()
o+=1
plot.ylabel(data['ylabel'])
plot.xlabel(data['xlabel'])
file = plot.savefig(title.replace(" ","")+".png")
#print file
return u"""
\\begin{figure}[ht!]
\centering
\includegraphics[width=\\textwidth]{""" + title.replace(" ","") + """.png}
\\caption{"""+(caller().strip() if type(caller) is not str else caller.strip())+u""" \\label{fig:""" + title + """}}
\\end{figure}\n"""
#return nodes.
#dictionary for evaluating functions
# variables - [] (use default values as initialization of data)
# function - str
def _evaluate_function(self, data):
funcstr = """\
def evalFunc(""" + ",".join(data['variables']) +"""):
return {e}""".format(e=data['function'])
exec(funcstr)
return evalFunc()
env = Environment(extensions=[PPExtension], loader=FileSystemLoader('.'))
t=None
file=None
if(len(sys.argv) >= 3):
t = env.get_template(sys.argv[2])
file = sys.argv[2]
elif(len(sys.argv) == 2):
t = env.get_template(sys.argv[1])
file = sys.argv[1]
elif(len(sys.argv) <=1):
file = str(raw_input("Template File("+getcwd()+"): "))
t = env.get_template(file)
f = open(file + "tmp", 'w')
f.write(t.render())
cmdstr = "xelatex -interaction=nonstopmode " + (sys.argv[1] if len(sys.argv) >=3 else "") + " " + f.name
f.close()
print subprocess.Popen( cmdstr, shell=True, stdout=subprocess.PIPE ).stdout.read()
print subprocess.Popen( cmdstr, shell=True, stdout=subprocess.PIPE ).stdout.read()
print subprocess.Popen( cmdstr, shell=True, stdout=subprocess.PIPE ).stdout.read()
#os.system("open " + os.path.splitext(os.path.basename(sys.argv[1]))[0] + ".pdf")
| mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/container.py | 1 | 3396 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib.cbook as cbook
class Container(tuple):
"""
Base class for containers.
"""
def __repr__(self):
return "<Container object of %d artists>" % (len(self))
def __new__(cls, *kl, **kwargs):
return tuple.__new__(cls, kl[0])
def __init__(self, kl, label=None):
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
self._remove_method = None
self.set_label(label)
def set_remove_method(self, f):
self._remove_method = f
def remove(self):
for c in self:
c.remove()
if self._remove_method:
self._remove_method(self)
def __getstate__(self):
d = self.__dict__.copy()
# remove the unpicklable remove method, this will get re-added on load
# (by the axes) if the artist lives on an axes.
d['_remove_method'] = None
return d
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: string or anything printable with '%s' conversion.
"""
if s is not None:
self._label = '%s' % (s, )
else:
self._label = None
self.pchanged()
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try:
del self._propobservers[oid]
except KeyError:
pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in list(six.iteritems(self._propobservers)):
func(self)
def get_children(self):
return list(cbook.flatten(self))
class BarContainer(Container):
def __init__(self, patches, errorbar=None, **kwargs):
self.patches = patches
self.errorbar = errorbar
Container.__init__(self, patches, **kwargs)
class ErrorbarContainer(Container):
def __init__(self, lines, has_xerr=False, has_yerr=False, **kwargs):
self.lines = lines
self.has_xerr = has_xerr
self.has_yerr = has_yerr
Container.__init__(self, lines, **kwargs)
class StemContainer(Container):
def __init__(self, markerline_stemlines_baseline, **kwargs):
markerline, stemlines, baseline = markerline_stemlines_baseline
self.markerline = markerline
self.stemlines = stemlines
self.baseline = baseline
Container.__init__(self, markerline_stemlines_baseline, **kwargs)
| mit |
cdawei/shogun | examples/undocumented/python_static/graphical/util.py | 22 | 1225 | """ Utilities for matplotlib examples """
import pylab
from numpy import ones, array, meshgrid, linspace, concatenate, ravel, min, max
from numpy.random import randn
QUITKEY='q'
NUM_EXAMPLES=200
DISTANCE=2
def quit (event):
if event.key==QUITKEY or event.key==QUITKEY.upper():
pylab.close()
def set_title (title):
quitmsg=" (press '"+QUITKEY+"' to quit)"
complete=title+quitmsg
manager=pylab.get_current_fig_manager()
# now we have to wrap the toolkit
if hasattr(manager, 'window'):
if hasattr(manager.window, 'setCaption'): # QT
manager.window.setCaption(complete)
if hasattr(manager.window, 'set_title'): # GTK
manager.window.set_title(complete)
elif hasattr(manager.window, 'title'): # TK
manager.window.title(complete)
def get_traindata():
return concatenate(
(randn(2, NUM_EXAMPLES)+DISTANCE, randn(2, NUM_EXAMPLES)-DISTANCE),
axis=1)
def get_meshgrid(traindata):
x1=linspace(1.2*min(traindata), 1.2*max(traindata), 50)
x2=linspace(1.2*min(traindata), 1.2*max(traindata), 50)
return meshgrid(x1,x2)
def get_testdata(x, y):
return array((ravel(x), ravel(y)))
def get_labels(raw=False):
return concatenate(
(-ones([1, NUM_EXAMPLES]), ones([1, NUM_EXAMPLES])),
axis=1)[0]
| gpl-3.0 |
ChristosChristofidis/bokeh | examples/compat/mpl/listcollection.py | 13 | 1573 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from bokeh import mpl
from bokeh.plotting import show
def make_segments(x, y):
'''
Create list of line segments from x and y coordinates.
'''
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def colorline(x, y, colors=None, linewidth=3, alpha=1.0):
'''
Plot a line with segments.
Optionally, specify segments colors and segments widths.
'''
# Make a list of colors cycling through the rgbcmyk series.
# You have several ways to input the colors:
# colors = ['r','g','b','c','y','m','k']
# colors = ['red','green','blue','cyan','yellow','magenta','black']
# colors = ['#ff0000', '#008000', '#0000ff', '#00bfbf', '#bfbf00', '#bf00bf', '#000000']
# colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0), (0.0, 0.75, 0.75, 1.0),
# (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0), (0.0, 0.0, 0.0, 1.0)]
colors = ['r', 'g', 'b', 'c', 'y', 'm', 'k']
widths = [5, 10, 20, 40, 20, 10, 5]
segments = make_segments(x, y)
lc = LineCollection(segments, colors=colors, linewidth=widths, alpha=alpha)
ax = plt.gca()
ax.add_collection(lc)
return lc
# Colored sine wave
x = np.linspace(0, 4 * np.pi, 100)
y = np.sin(x)
colorline(x, y)
plt.title("MPL support for ListCollection in Bokeh")
plt.xlim(x.min(), x.max())
plt.ylim(-1.0, 1.0)
show(mpl.to_bokeh(name="listcollection"))
| bsd-3-clause |
benoitsteiner/tensorflow-opencl | tensorflow/examples/learn/hdf5_classification.py | 75 | 2899 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
mesnardo/PetIBM | examples/ibpm/cylinder2dRe3000_GPU/scripts/plotVorticity.py | 3 | 1384 | """
Computes, plots, and saves the 2D vorticity field from a PetIBM simulation
after 3000 time steps (3 non-dimensional time-units).
"""
import pathlib
import h5py
import numpy
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
# Read vorticity field and its grid from files.
name = 'wz'
filepath = simu_dir / 'grid.h5'
f = h5py.File(filepath, 'r')
x, y = f[name]['x'][:], f[name]['y'][:]
X, Y = numpy.meshgrid(x, y)
timestep = 3000
filepath = simu_dir / 'solution' / '{:0>7}.h5'.format(timestep)
f = h5py.File(filepath, 'r')
wz = f[name][:]
# Read body coordinates from file.
filepath = simu_dir / 'circle.body'
with open(filepath, 'r') as infile:
xb, yb = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, skiprows=1)
pyplot.rc('font', family='serif', size=16)
# Plot the filled contour of the vorticity.
fig, ax = pyplot.subplots(figsize=(6.0, 6.0))
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
levels = numpy.linspace(-56.0, 56.0, 28)
ax.contour(X, Y, wz, levels=levels, colors='black')
ax.plot(xb, yb, color='red')
ax.set_xlim(-0.6, 1.6)
ax.set_ylim(-0.8, 0.8)
ax.set_aspect('equal')
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'wz{:0>7}.png'.format(timestep)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
MaxHalford/arcgonaut | examples/data/airports.py | 1 | 1120 | import pandas as pd
airportNames = ['id', 'name', 'city', 'country', 'IATA', 'ICAO',
'lat', 'lon', 'altitude', 'timezone', 'DST', 'tz']
routeNames = ['airline', 'airlineID', 'source', 'sourceID',
'destination', 'destinationID', 'codeshare', 'stops',
'equipment']
base = 'https://raw.githubusercontent.com/jpatokal/openflights/master/data/'
airports = pd.read_csv(base + 'airports.dat', names=airportNames)
routes = pd.read_csv(base + 'routes.dat', names=routeNames)
outwards = pd.merge(left=pd.merge(left=airports, right=routes, left_on='IATA',
right_on='source'), right=airports, left_on='destination',
right_on='IATA')[['country_x', 'country_y']]
outwards.columns = ('source', 'destination')
outwards['both'] = outwards['source'] + '_' + outwards['destination']
with open('airports.arcgo', 'w') as arcgo:
for countries, count in outwards.groupby('both').count().iterrows():
a, b = countries.split('_')
amount = count['destination']
if a != b:
arcgo.write('{0}>{1}>{2}\n'.format(a, b, amount))
| mit |
anntzer/scikit-learn | sklearn/feature_selection/_univariate_selection.py | 8 | 29031 | """Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ._base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
*args : array-like, sparse matrices
sample1, sample2... The sample measurements should be given as
arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See Also
--------
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample vectors.
y : array-like of shape (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
f_regression : F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
@_deprecate_positional_args
def f_regression(X, y, *, center=True):
"""Univariate linear regression tests.
Linear model for testing the individual effect of each of many regressors.
This is a scoring function to be used in a feature selection procedure, not
a free standing feature selection procedure.
This is done in 2 steps:
1. The correlation between each regressor and the target is computed,
that is, ((X[:, i] - mean(X[:, i])) * (y - mean_y)) / (std(X[:, i]) *
std(y)).
2. It is converted to an F score then to a p-value.
For more on usage see the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : bool, default=True
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See Also
--------
mutual_info_regression : Mutual information for a continuous target.
f_classif : ANOVA F-value between label/feature for classification tasks.
chi2 : Chi-squared stats of non-negative features for classification tasks.
SelectKBest : Select features based on the k highest scores.
SelectFpr : Select features based on a false positive rate test.
SelectFdr : Select features based on an estimated false discovery rate.
SelectFwe : Select features based on family-wise error rate.
SelectPercentile : Select features based on percentile of the highest
scores.
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
n_samples = X.shape[0]
# compute centered values
# note that E[(x - mean(x))*(y - mean(y))] = E[x*(y - mean(y))], so we
# need not center X
if center:
y = y - np.mean(y)
if issparse(X):
X_means = X.mean(axis=0).getA1()
else:
X_means = X.mean(axis=0)
# compute the scaled standard deviations via moments
X_norms = np.sqrt(row_norms(X.T, squared=True) -
n_samples * X_means ** 2)
else:
X_norms = row_norms(X.T)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= X_norms
corr /= np.linalg.norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(SelectorMixin, BaseEstimator):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
"""
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'],
multi_output=True)
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
score_func_ret = self.score_func(X, y)
if isinstance(score_func_ret, (list, tuple)):
self.scores_, self.pvalues_ = score_func_ret
self.pvalues_ = np.asarray(self.pvalues_)
else:
self.scores_ = score_func_ret
self.pvalues_ = None
self.scores_ = np.asarray(self.scores_)
return self
def _check_params(self, X, y):
pass
def _more_tags(self):
return {'requires_y': True}
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable, default=f_classif
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See Also"). The default function only
works with classification tasks.
.. versionadded:: 0.18
percentile : int, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like of shape (n_features,)
Scores of features.
pvalues_ : array-like of shape (n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.feature_selection import SelectPercentile, chi2
>>> X, y = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> X_new = SelectPercentile(chi2, percentile=10).fit_transform(X, y)
>>> X_new.shape
(1797, 7)
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
mutual_info_classif : Mutual information for a discrete target.
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
mutual_info_regression : Mutual information for a continuous target.
SelectKBest : Select features based on the k highest scores.
SelectFpr : Select features based on a false positive rate test.
SelectFdr : Select features based on an estimated false discovery rate.
SelectFwe : Select features based on family-wise error rate.
GenericUnivariateSelect : Univariate feature selector with configurable
mode.
"""
@_deprecate_positional_args
def __init__(self, score_func=f_classif, *, percentile=10):
super().__init__(score_func=score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self)
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=bool)
scores = _clean_nans(self.scores_)
threshold = np.percentile(scores, 100 - self.percentile)
mask = scores > threshold
ties = np.where(scores == threshold)[0]
if len(ties):
max_feats = int(len(scores) * self.percentile / 100)
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable, default=f_classif
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues) or a single array with scores.
Default is f_classif (see below "See Also"). The default function only
works with classification tasks.
.. versionadded:: 0.18
k : int or "all", default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like of shape (n_features,)
Scores of features.
pvalues_ : array-like of shape (n_features,)
p-values of feature scores, None if `score_func` returned only scores.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> X, y = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> X_new = SelectKBest(chi2, k=20).fit_transform(X, y)
>>> X_new.shape
(1797, 20)
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
mutual_info_classif : Mutual information for a discrete target.
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
mutual_info_regression : Mutual information for a continuous target.
SelectPercentile : Select features based on percentile of the highest
scores.
SelectFpr : Select features based on a false positive rate test.
SelectFdr : Select features based on an estimated false discovery rate.
SelectFwe : Select features based on family-wise error rate.
GenericUnivariateSelect : Univariate feature selector with configurable
mode.
"""
@_deprecate_positional_args
def __init__(self, score_func=f_classif, *, k=10):
super().__init__(score_func=score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features = %d; got %r. "
"Use k='all' to return all features."
% (X.shape[1], self.k))
def _get_support_mask(self):
check_is_fitted(self)
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable, default=f_classif
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See Also"). The default function only
works with classification tasks.
alpha : float, default=5e-2
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like of shape (n_features,)
Scores of features.
pvalues_ : array-like of shape (n_features,)
p-values of feature scores.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import SelectFpr, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> X_new = SelectFpr(chi2, alpha=0.01).fit_transform(X, y)
>>> X_new.shape
(569, 16)
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
chi2 : Chi-squared stats of non-negative features for classification tasks.
mutual_info_classif: Mutual information for a discrete target.
f_regression : F-value between label/feature for regression tasks.
mutual_info_regression : Mutual information for a continuous target.
SelectPercentile : Select features based on percentile of the highest
scores.
SelectKBest : Select features based on the k highest scores.
SelectFdr : Select features based on an estimated false discovery rate.
SelectFwe : Select features based on family-wise error rate.
GenericUnivariateSelect : Univariate feature selector with configurable
mode.
"""
@_deprecate_positional_args
def __init__(self, score_func=f_classif, *, alpha=5e-2):
super().__init__(score_func=score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self)
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable, default=f_classif
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See Also"). The default function only
works with classification tasks.
alpha : float, default=5e-2
The highest uncorrected p-value for features to keep.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import SelectFdr, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> X_new = SelectFdr(chi2, alpha=0.01).fit_transform(X, y)
>>> X_new.shape
(569, 16)
Attributes
----------
scores_ : array-like of shape (n_features,)
Scores of features.
pvalues_ : array-like of shape (n_features,)
p-values of feature scores.
References
----------
https://en.wikipedia.org/wiki/False_discovery_rate
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
mutual_info_classif : Mutual information for a discrete target.
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
mutual_info_regression : Mutual information for a contnuous target.
SelectPercentile : Select features based on percentile of the highest
scores.
SelectKBest : Select features based on the k highest scores.
SelectFpr : Select features based on a false positive rate test.
SelectFwe : Select features based on family-wise error rate.
GenericUnivariateSelect : Univariate feature selector with configurable
mode.
"""
@_deprecate_positional_args
def __init__(self, score_func=f_classif, *, alpha=5e-2):
super().__init__(score_func=score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self)
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features *
np.arange(1, n_features + 1)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable, default=f_classif
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
Default is f_classif (see below "See Also"). The default function only
works with classification tasks.
alpha : float, default=5e-2
The highest uncorrected p-value for features to keep.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import SelectFwe, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> X_new = SelectFwe(chi2, alpha=0.01).fit_transform(X, y)
>>> X_new.shape
(569, 15)
Attributes
----------
scores_ : array-like of shape (n_features,)
Scores of features.
pvalues_ : array-like of shape (n_features,)
p-values of feature scores.
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
SelectPercentile : Select features based on percentile of the highest
scores.
SelectKBest : Select features based on the k highest scores.
SelectFpr : Select features based on a false positive rate test.
SelectFdr : Select features based on an estimated false discovery rate.
GenericUnivariateSelect : Univariate feature selector with configurable
mode.
"""
@_deprecate_positional_args
def __init__(self, score_func=f_classif, *, alpha=5e-2):
super().__init__(score_func=score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self)
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable, default=f_classif
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues). For modes 'percentile' or 'kbest' it can return
a single array scores.
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}, default='percentile'
Feature selection mode.
param : float or int depending on the feature selection mode, default=1e-5
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like of shape (n_features,)
Scores of features.
pvalues_ : array-like of shape (n_features,)
p-values of feature scores, None if `score_func` returned scores only.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.feature_selection import GenericUnivariateSelect, chi2
>>> X, y = load_breast_cancer(return_X_y=True)
>>> X.shape
(569, 30)
>>> transformer = GenericUnivariateSelect(chi2, mode='k_best', param=20)
>>> X_new = transformer.fit_transform(X, y)
>>> X_new.shape
(569, 20)
See Also
--------
f_classif : ANOVA F-value between label/feature for classification tasks.
mutual_info_classif : Mutual information for a discrete target.
chi2 : Chi-squared stats of non-negative features for classification tasks.
f_regression : F-value between label/feature for regression tasks.
mutual_info_regression : Mutual information for a continuous target.
SelectPercentile : Select features based on percentile of the highest
scores.
SelectKBest : Select features based on the k highest scores.
SelectFpr : Select features based on a false positive rate test.
SelectFdr : Select features based on an estimated false discovery rate.
SelectFwe : Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
@_deprecate_positional_args
def __init__(self, score_func=f_classif, *, mode='percentile', param=1e-5):
super().__init__(score_func=score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self)
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
| bsd-3-clause |
LohithBlaze/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
altairpearl/scikit-learn | examples/linear_model/plot_robust_fit.py | 147 | 3050 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worse than OLS.
- The scores of HuberRegressor may not be compared directly to both TheilSen
and RANSAC because it does not attempt to completely filter the outliers
but lessen their effect.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn.linear_model import (
LinearRegression, TheilSenRegressor, RANSACRegressor, HuberRegressor)
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)),
('HuberRegressor', HuberRegressor())]
colors = {'OLS': 'turquoise', 'Theil-Sen': 'gold', 'RANSAC': 'lightgreen', 'HuberRegressor': 'black'}
linestyle = {'OLS': '-', 'Theil-Sen': '-.', 'RANSAC': '--', 'HuberRegressor': '--'}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling Errors Only', X, y),
('Corrupt X, Small Deviants', X_errors, y),
('Corrupt y, Small Deviants', X, y_errors),
('Corrupt X, Large Deviants', X_errors_large, y),
('Corrupt y, Large Deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'b+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot, color=colors[name], linestyle=linestyle[name],
linewidth=lw, label='%s: error = %.3f' % (name, mse))
legend_title = 'Error of Mean\nAbsolute Deviation\nto Non-corrupt Data'
legend = plt.legend(loc='upper right', frameon=False, title=legend_title,
prop=dict(size='x-small'))
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
kagayakidan/scikit-learn | sklearn/tree/tree.py | 59 | 34839 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"presort-best": _splitter.PresortBestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
shusenl/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
cebarbosa/fossilgroups | ppxf/ppxf.py | 1 | 72922 | ################################################################################
#
# Copyright (C) 2001-2016, Michele Cappellari
# E-mail: michele.cappellari_at_physics.ox.ac.uk
#
# Updated versions of the software are available from my web page
# http://purl.org/cappellari/software
#
# If you have found this software useful for your research,
# I would appreciate an acknowledgment to the use of the
# "Penalized Pixel-Fitting method by Cappellari & Emsellem (2004)".
#
# This software is provided as is without any warranty whatsoever.
# Permission to use, for non-commercial purposes is granted.
# Permission to modify for personal or internal use is granted,
# provided this copyright and disclaimer are included unchanged
# at the beginning of the file. All other rights are reserved.
#
################################################################################
#+
# NAME:
# ppxf()
#
# PURPOSE:
# Extract galaxy stellar kinematics (V, sigma, h3, h4, h5, h6)
# or the stellar population and gas emission by fitting a template
# to an observed spectrum in pixel space, using the Penalized
# Pixel-Fitting (pPXF) method originally described in:
# Cappellari M., & Emsellem E., 2004, PASP, 116, 138
# http://adsabs.harvard.edu/abs/2004PASP..116..138C
#
# The following key optional features are also available:
# 1) An optimal template, positive linear combination of different input
# templates, can be fitted together with the kinematics.
# 2) One can enforce smoothness on the template weights during the fit. This
# is useful to attach a physical meaning to the weights e.g. in terms of
# the star formation history of a galaxy.
# 3) One can fit multiple kinematic components for both the stars and the gas
# emission lines. Both the stellar and gas LOSVD can be penalized and can
# be described by a general Gauss-Hermite series.
# 4) Any parameter of the LOSVD (e.g. sigma) for any kinematic component can
# either be fitted or held fixed to a given value, while other parameters
# are fitted. Alternatively, parameters can be constrained to lie within
# given limits.
# 5) Additive and/or multiplicative polynomials can be included to adjust the
# continuum shape of the template to the observed spectrum.
# 6) Iterative sigma clipping can be used to clean the spectrum.
# 7) It is possible to fit a mirror-symmetric LOSVD to two spectra at the
# same time. This is useful for spectra taken at point-symmetric spatial
# positions with respect to the center of an equilibrium stellar system.
# 8) One can include sky spectra in the fit, to deal with cases where the sky
# dominates the observed spectrum and an accurate sky subtraction is
# critical.
# 9) One can derive an estimate of the reddening in the spectrum.
# 10) The covariance matrix can be input instead of the error spectrum, to
# account for correlated errors in the spectral pixels.
# 11) One can specify the weights fraction between two kinematics components,
# e.g. to model bulge and disk contributions.
# 12) One can use templates with higher resolution than the galaxy, to
# improve the accuracy of the LOSVD extraction at low dispersion.
#
# CALLING SEQUENCE:
#
# from ppxf import ppxf
#
# pp = ppxf(templates, galaxy, noise, velScale, start,
# bias=None, bounds=None, clean=False, component=0, degree=4,
# fixed=None, fraction=None, goodpixels=None, lam=None, mdegree=0,
# moments=4, oversample=None, plot=False, quiet=False,
# reddening=None, reg_dim=None, regul=0, sky=None,
# velscale_ratio=None, vsyst=0)
#
# print(pp.sol) # print best-fitting kinematics (V, sigma, h3, h4)
#
# INPUT PARAMETERS:
# TEMPLATES: vector containing the spectrum of a single template star or more
# commonly an array of dimensions TEMPLATES[nPixels, nTemplates]
# containing different templates to be optimized during the fit of the
# kinematics. nPixels has to be >= the number of galaxy pixels.
# - To apply linear regularization to the WEIGHTS via the keyword REGUL,
# TEMPLATES should be an array of two TEMPLATES[nPixels, nAge], three
# TEMPLATES[nPixels, nAge, nMetal] or four
# TEMPLATES[nPixels, nAge, nMetal, nAlpha] dimensions, depending on the
# number of population variables one wants to study.
# This can be useful to try to attach a physical meaning to the output
# WEIGHTS, in term of the galaxy star formation history and chemical
# composition distribution.
# In that case the templates may represent single stellar population SSP
# models and should be arranged in sequence of increasing age, metallicity
# or alpha along the second, third or fourth dimension of the array
# respectively.
# - TEMPLATES and GALAXY do not need to span the same wavelength range.
# However an error will be returned by PPXF, if the velocity shift in
# pixels, required to match the galaxy with the templates, becomes larger
# than nPixels. In that case one has to truncate either the galaxy or the
# templates to make the two rest-frame spectral ranges more similar.
# GALAXY: vector containing the spectrum of the galaxy to be measured. The
# star and the galaxy spectra have to be logarithmically rebinned but the
# continuum should *not* be subtracted. The rebinning may be performed
# with the LOG_REBIN routine that is distributed with PPXF.
# - For high redshift galaxies, one should bring the spectra close to the
# restframe wavelength, before doing the PPXF fit, to prevent too large
# velocity shifts of the templates. This can be done by dividing the
# observed wavelength by (1 + z), where z is a rough estimate of the
# galaxy redshift, before the logarithmic rebinning.
# - GALAXY can also be an array of dimensions GALAXY[nGalPixels, 2]
# containing two spectra to be fitted, at the same time, with a
# reflection-symmetric LOSVD. This is useful for spectra taken at
# point-symmetric spatial positions with respect to the center of an
# equilibrium stellar system.
# For a discussion of the usefulness of this two-sided fitting see e.g.
# Section 3.6 of Rix & White (1992, MNRAS, 254, 389).
# - IMPORTANT: (1) For the two-sided fitting the VSYST keyword has to be
# used. (2) Make sure the spectra are rescaled to be not too many order of
# magnitude different from unity, to avoid over or underflow problems in
# the calculation. E.g. units of erg/(s cm^2 A) may cause problems!
# NOISE: vector containing the 1*sigma error (per pixel) in the galaxy
# spectrum, or covariance matrix describing the correlated errors in the
# galaxy spectrum. Of course this vector/matrix must have the same units
# as the galaxy spectrum.
# - If GALAXY is a Nx2 array, NOISE has to be an array with the same
# dimensions.
# - When NOISE has dimensions NxN it is assumed to contain the covariance
# matrix with elements sigma(i, j). When the errors in the spectrum are
# uncorrelated it is mathematically equivalent to input in PPXF an error
# vector NOISE=errvec or a NxN diagonal matrix NOISE=np.diag(errvec**2)
# (note squared!).
# - IMPORTANT: the penalty term of the pPXF method is based on the
# *relative* change of the fit residuals. For this reason the penalty will
# work as expected even if no reliable estimate of the NOISE is available
# (see Cappellari & Emsellem [2004] for details).
# If no reliable noise is available this keyword can just be set to:
# NOISE = np.ones_like(galaxy) # Same weight for all pixels
# VELSCALE: velocity scale of the spectra in km/s per pixel. It has to be the
# same for both the galaxy and the template spectra.
# An exception is when the VELSCALE_RATIO keyword is used, in which case
# one can input TEMPLATES with smaller VELSCALE than GALAXY.
# - VELSCALE is *defined* in pPXF by VELSCALE = c*Delta[np.log(lambda)],
# which is approximately VELSCALE ~ c*Delta(lambda)/lambda.
# START: vector of size MOMENTS with the initial estimate for the LOSVD
# parameters.
# EXAMPLE: if MOMENTS=2, then START = [velStart, sigmaStart] contains the
# initial guess for the velocity and the velocity dispersion in km/s.
# When MOMENTS > 2, the recommended initial guess for the Gauss-Hermite
# parameters is zero. Nonzero values for h3-h6 are only useful when the
# LOSVD is kept fixed.
# - Unless a good initial guess is available, it is recommended to set the
# starting sigma >= 3*velScale in km/s (i.e. 3 pixels). In fact when the
# LOSVD is severely undersampled, and far from the true solution, the
# chi^2 of the fit becomes weakly sensitive to small variations in sigma
# (see pPXF paper). In some instances the near-constancy of chi^2 may
# cause premature convergence of the optimization.
# - In the case of two-sided fitting a good starting value for the velocity
# is velStart=0.0 (in this case VSYST will generally be nonzero).
# Alternatively on should keep in mind that velStart refers to the first
# input galaxy spectrum, while the second will have velocity -velStart.
# - With multiple kinematic components START must be a tuple of starting
# values, one for each different component.
# - EXAMPLE: We want to fit two kinematic components. We fit 4 moments for
# the first component and 2 moments for the second one as follows
# component = [0, 0, ... 0, 1, 1, ... 1]
# moments = [4, 2]
# start = [[V1, sigma1, 0, 0], [V2, sigma2, 0, 0]]
# All elements of START need to have the same number of elements as the
# largest MOMENTS.
# - EXAMPLE: We want to fit 2 moments for both kinematic components
# component = [0, 0, ... 0, 1, 1, ... 1]
# moments = [2, 2]
# start = [[V1, sigma1], [V2, sigma2]]
#
# KEYWORDS:
# BIAS: This parameter biases the (h3, h4, ...) measurements towards zero
# (Gaussian LOSVD) unless their inclusion significantly decreases the
# error in the fit. Set this to BIAS=0.0 not to bias the fit: the solution
# (including [V, sigma]) will be noisier in that case. The default BIAS
# should provide acceptable results in most cases, but it would be safe to
# test it with Monte Carlo simulations. This keyword precisely corresponds
# to the parameter \lambda in the Cappellari & Emsellem (2004) paper. Note
# that the penalty depends on the *relative* change of the fit residuals,
# so it is insensitive to proper scaling of the NOISE vector. A nonzero
# BIAS can be safely used even without a reliable NOISE spectrum, or with
# equal weighting for all pixels.
# BOUNDS: Lower and upper bounds for every kinematic parameter.
# This is an array, or list of tuples, with the same dimensions as START,
# except for the last one, which is two. In practice, for every elements
# of START one needs to specify a pair of values [lower, upper].
# - EXAMPLE: We want to fit two kinematic components, with 4 moments for the
# first component and 2 for the second. In this case
# moments = [4, 2]
# start = [[V1, sigma1, 0, 0], [V2, sigma2, 0, 0]]
# then we can specify boundaries for each kinematic parameter as
# bounds = [[[V1_lo, V1_up], [sigma1_lo, sigma1_up],
# [-0.3, 0.3], [-0.3, 0.3]],
# [[V2_lo, V2_up], [sigma2_lo, sigma2_up],
# [-0.3, 0.3], [-0.3, 0.3]]]
# NOTE: All components of the START and BOUNDS arrays have the same number
# of elements as the largest MOMENTS, but bounds for non-fitted moments
# are ignored.
# COMPONENT: When fitting more than one kinematic component, this keyword
# should contain the component number of each input template. In principle
# every template can belong to a different kinematic component.
# - EXAMPLE: We want to fit the first 50 templates to component 0 and the
# last 10 templates to component 1. In this case
# component = [0]*50 + [1]*10
# which, in Python syntax, is equivalent to
# component = [0, 0, ... 0, 1, 1, ... 1]
# - This keyword is especially useful when fitting both emission (gas) and
# absorption (stars) templates simultaneously (see example for MOMENTS
# keyword).
# CLEAN: set this keyword to use the iterative sigma clipping method described
# in Section 2.1 of Cappellari et al. (2002, ApJ, 578, 787).
# This is useful to remove from the fit unmasked bad pixels, residual gas
# emissions or cosmic rays.
# - IMPORTANT: This is recommended *only* if a reliable estimate of the
# NOISE spectrum is available. See also note below for SOL.
# DEGREE: degree of the *additive* Legendre polynomial used to correct the
# template continuum shape during the fit (default: 4).
# Set DEGREE = -1 not to include any additive polynomial.
# FIXED: Boolean specifying whether a given kinematic parameter has to be held
# fixed with the value given in START.
# This is an array, or list of tuples, with the same dimensions as START.
# - EXAMPLE: We want to fit two kinematic components, with 4 moments for the
# first component and 2 for the second. In this case
# moments = [4, 2]
# start = [[V1, sigma1, 0, 0], [V2, sigma2, 0, 0]]
# then we can held fixed the sigma (only) of both components using
# fixed = [[0, 1, 0, 0], [0, 1, 0, 0]]
# - NOTE: Setting a negative MOMENTS for a kinematic component is entirely
# equivalent to setting `fixed = 1` for all parameters of the given
# kinematic component. In other words
# moments = [-4, 2]
# is equivalent to
# moments = [4, 2]
# fixed = [[1, 1, 1, 1], [0, 0, 0, 0]]
# FRACTION: This keyword allows one to fix the ratio between the first two
# kinematic components. This is a scalar defined as follows
# FRACTION = np.sum(WEIGHTS[COMPONENT == 0]) \
# / np.sum(WEIGHTS[COMPONENT < 2])
# This is useful e.g. to try to kinematically decompose bulge and disk.
# - IMPORTANT: The TEMPLATES and GALAXY spectra should be normalized with
# mean ~ 1 (as order of magnitude) for the FRACTION keyword to work as
# expected. A warning is printed if this is not the case and the resulting
# output FRACTION is inaccurate.
# - The remaining kinematic components (COMPONENT > 1) are left free, and
# this allows, for example, to still include gas emission line components.
# GOODPIXELS: integer vector containing the indices of the good pixels in the
# GALAXY spectrum (in increasing order). Only these pixels are included in
# the fit. If the CLEAN keyword is set, in output this vector will be
# updated to contain the indices of the pixels that were actually used in
# the fit.
# - IMPORTANT: in all likely situations this keyword *has* to be specified.
# LAM: When the keyword REDDENING is used, the user has to pass in this
# keyword a vector with the same dimensions of GALAXY, giving the
# restframe wavelength in Angstrom of every pixel in the input galaxy
# spectrum. If one uses my LOG_REBIN routine to rebin the spectrum before
# the PPXF fit:
# from ppxf_util import log_rebin
# specNew, logLam, velscale = log_rebin(lamRange, galaxy)
# the wavelength can be obtained as lam = np.exp(logLam).
# MASK: boolean vector of length GALAXY.size specifying with 1 the pixels that
# should be included in the fit. This keyword is just an alternative way
# of specifying the GOODPIXELS.
# MDEGREE: degree of the *multiplicative* Legendre polynomial (with mean of 1)
# used to correct the continuum shape during the fit (default: 0). The
# zero degree multiplicative polynomial is always included in the fit as
# it corresponds to the weights assigned to the templates.
# Note that the computation time is longer with multiplicative polynomials
# than with the same number of additive polynomials.
# - IMPORTANT: Multiplicative polynomials cannot be used when the REDDENING
# keyword is set.
# MOMENTS: Order of the Gauss-Hermite moments to fit. Set this keyword to 4 to
# fit [h3, h4] and to 6 to fit [h3, h4, h5, h6]. Note that in all cases
# the G-H moments are fitted (non-linearly) *together* with [V, sigma].
# - If MOMENTS=2 or MOMENTS is not set then only [V, sigma] are fitted and
# the other parameters are returned as zero.
# - If MOMENTS is negative then the kinematics of the given COMPONENT are
# kept fixed to the input values.
# NOTE: Setting a negative MOMENTS for a kinematic component is entirely
# equivalent to setting `fixed = 1` for all parameters of the given
# kinematic component.
# - EXAMPLE: We want to keep fixed component 0, which has an LOSVD described
# by [V, sigma, h3, h4] and is modelled with 100 spectral templates;
# At the same time we fit [V, sigma] for COMPONENT=1, which is described
# by 5 templates (this situation may arise when fitting stellar templates
# with pre-determined stellar kinematics, while fitting the gas emission).
# We should give in input to ppxf() the following parameters:
# component = [0]*100 + [1]*5 # --> [0, 0, ... 0, 1, 1, 1, 1, 1]
# moments = [-4, 2]
# start = [[V, sigma, h3, h4], [V, sigma, 0, 0]]
# OVERSAMPLE: Set this keyword to a factor by which the template is
# oversampled before convolving it with a well sampled LOSVD. This can be
# useful to extract proper velocities, even when sigma < 0.7*velScale and
# the dispersion information becomes totally unreliable due to
# undersampling.
# IMPORTANT: Use of this keyword is discouraged. One should sample the
# spectrum more finely, or use higher resolution templates in combination
# with the VELSCALE_RATIO keyword, if possible.
# VELSCALE_RATIO: Integer. Gives the integer ratio > 1 between the VELSCALE of
# the GALAXY and the TEMPLATES. When this keyword is used, the templates
# are convolved by the LOSVD at their native resolution, and only
# subsequently are integrated over the pixels and fitted to GALAXY.
# This is useful for accurate recovery of the LOSVD below VELSCALE when
# templates with higher resolution than the galaxy spectrum are available.
# - Note that in realistic situations the uncertainty in the knowledge and
# variations of the intrinsic line-spread function become the limiting
# factor in recovering the LOSVD well below VELSCALE.
# PLOT: set this keyword to plot the best fitting solution and the residuals
# at the end of the fit.
# QUIET: set this keyword to suppress verbose output of the best fitting
# parameters at the end of the fit.
# REDDENING: Set this keyword to an initial estimate of the reddening
# E(B-V)>=0 to fit a positive reddening together with the kinematics and
# the templates. The fit assumes the extinction curve of Calzetti et al.
# (2000, ApJ, 533, 682) but any other prescriptions could be trivially
# implemented by modifying the function REDDENING_CURVE below.
# - IMPORTANT: The MDEGREE keyword cannot be used when REDDENING is set.
# REGUL: If this keyword is nonzero, the program applies second-degree linear
# regularization to the WEIGHTS during the PPXF fit.
# Regularization is done in one, two or three dimensions depending on
# whether the array of TEMPLATES has two, three or four dimensions
# respectively.
# Large REGUL values correspond to smoother WEIGHTS output. The WEIGHTS
# tend to a linear trend for large REGUL. When this keyword is nonzero the
# solution will be a trade-off between smoothness of WEIGHTS and goodness
# of fit.
# - When fitting multiple kinematic COMPONENT the regularization is applied
# only to the first COMPONENT=0, while additional components are not
# regularized. This is useful when fitting stellar population together
# with gas emission lines. In that case the SSP spectral templates must be
# given first and the gas emission templates are given last. In this
# situation one has to use the REG_DIM keyword (below), to give PPXF the
# dimensions of the population parameters (e.g. nAge, nMetal, nAlpha).
# An usage example is given in ppxf_population_gas_example_sdss.py
# - The effect of the regularization scheme is to enforce the numerical
# second derivatives between neighbouring weights (in every dimension) to
# be equal to `w[j-1] - 2*w[j] + w[j+1] = 0` with an error Delta=1/REGUL.
# It may be helpful to define REGUL=1/Delta and view Delta as the
# regularization error.
# - IMPORTANT: Delta needs to be smaller but of the same order of magnitude
# of the typical WEIGHTS to play an effect on the regularization. One
# quick way to achieve this is:
# (i) Divide the full TEMPLATES array by a scalar in such a way that
# the typical template has a median of one:
# TEMPLATES /= np.median(TEMPLATES);
# (ii) Do the same for the input GALAXY spectrum:
# GALAXY /= np.median(GALAXY).
# In this situation a sensible guess for Delta will be a few
# percent (e.g. 0.01 --> REGUL=100).
# - Alternatively, for a more rigorous definition of the parameter REGUL:
# (a) Perform an un-regularized fit (REGUL=0) and then rescale the
# input NOISE spectrum so that
# Chi^2/DOF = Chi^2/N_ELEMENTS(goodPixels) = 1.
# This is achieved by rescaling the input NOISE spectrum as
# NOISE = NOISE*sqrt(Chi**2/DOF) = NOISE*sqrt(pp.chi2);
# (b) Increase REGUL and iteratively redo the pPXF fit until the Chi^2
# increases from the unregularized value Chi^2 = len(goodPixels)
# value by DeltaChi^2 = sqrt(2*len(goodPixels)).
# The derived regularization corresponds to the maximum one still
# consistent with the observations and the derived star formation history
# will be the smoothest (minimum curvature) that is still consistent with
# the observations.
# - For a detailed explanation see Section 19.5 of Press et al. (2007:
# Numerical Recipes 3rd ed. available here http://www.nrbook.com/). The
# adopted implementation corresponds to their equation (19.5.10).
# REG_DIM: When using regularization with more than one kinematic component
# (using the COMPONENT keyword), the regularization is only applied to the
# first one (COMPONENT=0). This is useful to fit the stellar population
# and gas emission together.
# In this situation one has to use the REG_DIM keyword, to give PPXF the
# dimensions of the population parameters (e.g. nAge, nMetal, nAlpha).
# One should creates the initial array of population templates like
# e.g. TEMPLATES[nPixels, nAge, nMetal, nAlpha] and define
# reg_dim = TEMPLATES.shape[1:] = [nAge, nMetal, nAlpha]
# The array of stellar templates is then reshaped into a 2-dim array as
# TEMPLATES = TEMPLATES.reshape(TEMPLATES.shape[0], -1)
# and the gas emission templates are appended as extra columns at the end.
# An usage example is given in ppxf_population_gas_example_sdss.py.
# - When using regularization with a single component (the COMPONENT keyword
# is not used or contains identical values), the number of population
# templates along different dimensions (e.g. nAge, nMetal, nAlpha) is
# inferred from the dimensions of the TEMPLATES array and this keyword is
# not necessary.
# SKY: vector containing the spectrum of the sky to be included in the fit, or
# array of dimensions SKY[nPixels, nSky] containing different sky spectra
# to add to the model of the observed GALAXY spectrum. The SKY has to be
# log-rebinned as the GALAXY spectrum and needs to have the same number of
# pixels.
# - The sky is generally subtracted from the data before the PPXF fit.
# However, for observations very heavily dominated by the sky spectrum,
# where a very accurate sky subtraction is critical, it may be useful
# *not* to subtract the sky from the spectrum, but to include it in the
# fit using this keyword.
# VSYST: galaxy systemic velocity (zero by default). The input initial guess
# and the output velocities are measured with respect to this velocity.
# The value assigned to this keyword is *crucial* for the two-sided
# fitting. In this case VSYST can be determined from a previous normal
# one-sided fit to the galaxy velocity profile. After that initial fit,
# VSYST can be defined as the measured velocity at the galaxy center.
# More accurately VSYST is the value which has to be subtracted to obtain
# a nearly anti-symmetric velocity profile at the two opposite sides of
# the galaxy nucleus.
# - IMPORTANT: this value is generally *different* from the systemic
# velocity one can get from the literature. Do not try to use that!
#
# OUTPUT PARAMETERS (stored as attributes of the PPXF class):
# BESTFIT: a named variable to receive a vector with the best fitting
# template: this is a linear combination of the templates, convolved with
# the best fitting LOSVD, multiplied by the multiplicative polynomials and
# with subsequently added polynomial continuum terms.
# - A version of this vector, *without* LOSVD convolution, is given by
# BESTFIT = (TEMPLATES @ WEIGHTS)*mpoly + apoly,
# where the expressions to evaluate mpoly and apoly are given in the
# documentation of MPOLYWEIGHTS and POLYWEIGHTS respectively.
# CHI2: The reduced chi^2 (=chi^2/DOF) of the fit.
# GOODPIXELS: integer vector containing the indices of the good pixels in the
# fit. This vector is the same as the input GOODPIXELS if the CLEAN
# keyword is *not* set, otherwise it will be updated by removing the
# detected outliers.
# ERROR: this variable contain a vector of *formal* errors (1*sigma) for the
# fitted parameters in the output vector SOL. This option can be used when
# speed is essential, to obtain an order of magnitude estimate of the
# uncertainties, but we *strongly* recommend to run Monte Carlo
# simulations to obtain more reliable errors. In fact these errors can be
# severely underestimated in the region where the penalty effect is most
# important (sigma < 2*velScale).
# - These errors are meaningless unless Chi^2/DOF~1 (see parameter SOL
# below). However if one *assume* that the fit is good, a corrected
# estimate of the errors is:
# errorCorr = error*sqrt(chi^2/DOF) = pp.error*sqrt(pp.chi2).
# - IMPORTANT: when running Monte Carlo simulations to determine the error,
# the penalty (BIAS) should be set to zero, or better to a very small
# value. See Section 3.4 of Cappellari & Emsellem (2004) for an
# explanation.
# POLYWEIGHTS: When DEGREE >= 0 contains the weights of the additive Legendre
# polynomials of order 0, 1, ... DEGREE. The best fitting additive
# polynomial can be explicitly evaluated as
# x = np.linspace(-1, 1, len(galaxy))
# apoly = np.polynomial.legendre.legval(x, pp.polyweights)
# - When doing a two-sided fitting (see help for GALAXY parameter), the
# additive polynomials are allowed to be different for the left and right
# spectrum. In that case the output weights of the additive polynomials
# alternate between the first (left) spectrum and the second (right)
# spectrum.
# MATRIX: Design matrix[nPixels, DEGREE+nTemplates] of the linear system.
# - pp.matrix[nPixels, :DEGREE] contains the additive polynomials if
# DEGREE >= 0.
# - pp.matrix[nPixels, DEGREE:] contains the templates convolved by the
# LOSVD and multiplied by the multiplicative polynomials if MDEGREE > 0.
# - pp.matrix[nPixels, -nGas:] contains the nGas emission line templates if
# given. In the latter case the best fitting gas emission line spectrum is
# lines = pp.matrix[:, -nGas:] @ pp.weights[-nGas:]
# MPOLYWEIGHTS: When MDEGREE > 0 this contains in output the coefficients of
# the multiplicative Legendre polynomials of order 1, 2, ... MDEGREE.
# The polynomial can be explicitly evaluated as:
# from numpy.polynomials import legendre
# x = np.linspace(-1, 1, len(galaxy))
# mpoly = legendre.legval(x, np.append(1, pp.mpolyweights))
# REDDENING: Best fitting E(B-V) value if the REDDENING keyword is set.
# SOL: Vector containing in output the parameters of the kinematics.
# If MOMENTS=2 this contains [Vel, Sigma]
# If MOMENTS=4 this contains [Vel, Sigma, h3, h4]
# If MOMENTS=6 this contains [Vel, Sigma, h3, h4, h5, h6]
# - When fitting multiple kinematic COMPONENT, pp.sol contains the solution
# for all different components, one after the other, sorted by COMPONENT.
# - Vel is the velocity, Sigma is the velocity dispersion, h3-h6 are the
# Gauss-Hermite coefficients. The model parameters are fitted
# simultaneously.
# - IMPORTANT: pPXF does not directly measure velocities, instead it
# measures shifts in spectral pixels. Given that the spectral pixels are
# equally spaced in logarithmic units, this implies that pPXF measures
# shifts of np.log(lam).
# Given that one is generally interested in velocities in km/s, Vel is
# *defined* in pPXF by Vel = c*np.log(lam_obs/lam_0), which reduces to the
# well known Doppler formula Vel = c*dLam/lam_0 for small dLam. In this
# way pPXF returns meaningful velocities for nearby galaxies, or when a
# spectrum was de-redshifted before extracting the kinematics. However
# Vel is not a well-defined quantity at large redshifts. In that case Vel
# should be converted into redshift for meaningful results.
# Given the above definition, the precise relation between the output pPXF
# velocity and redshift is Vel = c*np.log(1 + z), which reduces to the
# well known approximation z ~ Vel/c in the limit of small Vel.
# - These are the default safety limits on the fitting parameters:
# a) Vel is constrained to be +/-2000 km/s from the first input guess
# b) velScale/10 < Sigma < 1000 km/s
# c) -0.3 < [h3, h4, ...] < 0.3 (limits are extreme value for real
# galaxies)
# They can be changed using the BOUNDS keyword.
# - In the case of two-sided LOSVD fitting the output values refer to the
# first input galaxy spectrum, while the second spectrum will have by
# construction kinematics parameters [-Vel, Sigma, -h3, h4, -h5, h6].
# If VSYST is nonzero (as required for two-sided fitting), then the output
# velocity is measured with respect to VSIST.
# - IMPORTANT: if Chi^2/DOF is not ~1 it means that the errors are not
# properly estimated, or that the template is bad and it is *not* safe to
# set the /CLEAN keyword.
# WEIGHTS: receives the value of the weights by which each template was
# multiplied to best fit the galaxy spectrum. The optimal template can be
# computed with an array-vector multiplication:
# TEMP = TEMPLATES @ WEIGHTS (Numpy >= 1.10 syntax on Python >= 3.5)
# - These weights do not include the weights of the additive polynomials
# which are separately stored in pp.polyweights.
# - When the SKY keyword is used WEIGHTS[:nTemplates] contains the weights
# for the templates, while WEIGHTS[nTemplates:] gives the ones for the
# sky. In that case the best fitting galaxy template and sky are given by:
# TEMP = TEMPLATES @ WEIGHTS[:nTemplates]
# BESTSKY = SKY @ WEIGHTS[nTemplates:]
# - When doing a two-sided fitting (see help for GALAXY parameter)
# *together* with the SKY keyword, the sky weights are allowed to be
# different for the left and right spectrum. In that case the output sky
# weights alternate between the first (left) spectrum and the second
# (right) spectrum.
#
#--------------------------------
# IMPORTANT: Proper usage of pPXF
#--------------------------------
#
# The PPXF routine can give sensible quick results with the default BIAS
# parameter, however, like in any penalized/filtered/regularized method, the
# optimal amount of penalization generally depends on the problem under study.
#
# The general rule here is that the penalty should leave the line-of-sight
# velocity-distribution (LOSVD) virtually unaffected, when it is well sampled
# and the signal-to-noise ratio (S/N) is sufficiently high.
#
# EXAMPLE: If you expect an LOSVD with up to a high h4 ~ 0.2 and your adopted
# penalty (BIAS) biases the solution towards a much lower h4 ~ 0.1, even when
# the measured sigma > 3*velScale and the S/N is high, then you
# are *misusing* the pPXF method!
#
# THE RECIPE: The following is a simple practical recipe for a sensible
# determination of the penalty in pPXF:
#
# 1. Choose a minimum (S/N)_min level for your kinematics extraction and
# spatially bin your data so that there are no spectra below (S/N)_min;
#
# 2. Perform a fit of your kinematics *without* penalty (PPXF keyword BIAS=0).
# The solution will be noisy and may be affected by spurious solutions,
# however this step will allow you to check the expected mean ranges in the
# Gauss-Hermite parameters [h3, h4] for the galaxy under study;
#
# 3. Perform a Monte Carlo simulation of your spectra, following e.g. the
# included ppxf_simulation_example.py routine. Adopt as S/N in the simulation
# the chosen value (S/N)_min and as input [h3, h4] the maximum representative
# values measured in the non-penalized pPXF fit of the previous step;
#
# 4. Choose as penalty (BIAS) the *largest* value such that, for
# sigma > 3*velScale, the mean difference delta between the output [h3, h4]
# and the input [h3, h4] is well within (e.g. delta~rms/3) the rms scatter of
# the simulated values (see an example in Fig.2 of Emsellem et al. 2004,
# MNRAS, 352, 721).
#
#--------------------------------
#
# REQUIRED ROUTINES:
# MPFIT: file cap_mpfit.py included in the distribution
#
# MODIFICATION HISTORY:
# V1.0.0 -- Created by Michele Cappellari, Leiden, 10 October 2001.
# V3.4.7 -- First released version. MC, Leiden, 8 December 2003
# V3.5.0 -- Included /OVERSAMPLE option. MC, Leiden, 11 December 2003
# V3.6.0 -- Added MDEGREE option for multiplicative polynomials.
# Linear implementation: fast, works well in most cases, but can fail
# in certain cases. MC, Leiden, 19 March 2004
# V3.7.0 -- Revised implementation of MDEGREE option. Nonlinear
# implementation: straightforward, robust, but slower.
# MC, Leiden, 23 March 2004
# V3.7.1 -- Updated documentation. MC, Leiden, 31 March 2004
# V3.7.2 -- Corrected program stop after fit when MOMENTS=2. Bug was
# introduced in V3.7.0. MC, Leiden, 28 April 2004
# V3.7.3 -- Corrected bug: keyword ERROR was returned in pixels instead of
# km/s. Decreased lower limit on fitted dispersion. Thanks to Igor
# V. Chilingarian. MC, Leiden, 7 August 2004
# V4.0.0 -- Introduced optional two-sided fitting assuming a reflection
# symmetric LOSVD for two input spectra. MC, Vicenza, 16 August 2004
# V4.1.0 -- Corrected implementation of two-sided fitting of the LOSVD.
# Thanks to Stefan van Dongen for reporting problems.
# MC, Leiden, 3 September 2004
# V4.1.1 -- Increased maximum number of iterations ITMAX in BVLS.
# Thanks to Jesus Falcon-Barroso for reporting problems.
# Introduced error message when velocity shift is too big.
# Corrected output when MOMENTS=0. MC, Leiden, 21 September 2004
# V4.1.2 -- Handle special case where a single template without additive
# polynomials is fitted to the galaxy. MC, Leiden, 11 November 2004
# V4.1.3 -- Updated documentation. MC, Vicenza, 30 December 2004
# V4.1.4 -- Make sure input NOISE is a positive vector.
# MC, Leiden, 12 January 2005
# V4.1.5 -- Verify that GOODPIXELS is monotonic and does not contain
# duplicated values. After feedback from Richard McDermid.
# MC, Leiden, 10 February 2005
# V4.1.6 -- Print number of nonzero templates. Do not print outliers in
# /QUIET mode. MC, Leiden, 20 January 2006
# V4.1.7 -- Updated documentation with important note on penalty
# determination. MC, Oxford, 6 October 2007
# V4.2.0 -- Introduced optional fitting of SKY spectrum. Many thanks to
# Anne-Marie Weijmans for testing. MC, Oxford, 15 March 2008
# V4.2.1 -- Use LA_LEAST_SQUARES (IDL 5.6) instead of SVDC when fitting
# a single template. Please let me know if you need to use PPXF
# with an older IDL version. MC, Oxford, 17 May 2008
# V4.2.2 -- Added keyword POLYWEIGHTS. MC, Windhoek, 3 July 2008
# V4.2.3 -- Corrected error message for too big velocity shift.
# MC, Oxford, 27 November 2008
# V4.3.0 -- Introduced REGUL keyword to perform linear regularization of
# WEIGHTS in one or two dimensions. MC, Oxford, 4 Mach 2009
# V4.4.0 -- Introduced Calzetti et al. (2000) PPXF_REDDENING_CURVE function to
# estimate the reddening from the fit. MC, Oxford, 18 September 2009
# V4.5.0 -- Dramatic speed up in the convolution of long spectra.
# MC, Oxford, 13 April 2010
# V4.6.0 -- Important fix to /CLEAN procedure: bad pixels are now properly
# updated during the 3sigma iterations. MC, Oxford, 12 April 2011
# V4.6.1 -- Use Coyote Graphics (http://www.idlcoyote.com/) by David W.
# Fanning. The required routines are now included in NASA IDL
# Astronomy Library. MC, Oxford, 29 July 2011
# V4.6.2 -- Included option for 3D regularization and updated documentation of
# REGUL keyword. MC, Oxford, 17 October 2011
# V4.6.3 -- Do not change TEMPLATES array in output when REGUL is nonzero.
# From feedback of Richard McDermid. MC, Oxford 25 October 2011
# V4.6.4 -- Increased oversampling factor to 30x, when the /OVERSAMPLE keyword
# is used. Updated corresponding documentation. Thanks to Nora
# Lu"tzgendorf for test cases illustrating errors in the recovered
# velocity when the sigma is severely undersampled.
# MC, Oxford, 9 December 2011
# V4.6.5 -- Expanded documentation of REGUL keyword.
# MC, Oxford, 15 November 2012
# V4.6.6 -- Uses CAP_RANGE to avoid potential naming conflicts.
# MC, Paranal, 8 November 2013
# V5.0.0 -- Translated from IDL into Python and tested against the original
# version. MC, Oxford, 6 December 2013
# V5.0.1 -- Minor cleaning and corrections. MC, Oxford, 12 December 2013
# V5.1.0 -- Allow for a different LOSVD for each template. Templates can be
# stellar or can be gas emission lines. A PPXF version adapted for
# multiple kinematic components existed for years. It was updated in
# JAN/2012 for the paper by Johnston et al. (2013, MNRAS). This
# version merges those changes with the public PPXF version, making
# sure that all previous PPXF options are still supported.
# MC, Oxford, 9 January 2014
# V5.1.1 -- Fixed typo in the documentation of nnls_flags.
# MC, Dallas Airport, 9 February 2014
# V5.1.2 -- Replaced REBIN with INTERPOLATE with /OVERSAMPLE keyword. This is
# to account for the fact that the Line Spread Function of the
# observed galaxy spectrum already includes pixel convolution. Thanks
# to Mike Blanton for the suggestion. MC, Oxford, 6 May 2014
# V5.1.3 -- Allow for an input covariance matrix instead of an error spectrum.
# MC, Oxford, 7 May 2014
# V5.1.4: Support both Python 2.6/2.7 and Python 3.x. MC, Oxford, 25 May 2014
# V5.1.5: Fixed deprecation warning. MC, Oxford, 21 June 2014
# V5.1.6: Catch an additional input error. Updated documentation for Python.
# Included templates `matrix` in output. Modified plotting colours.
# MC, Oxford, 6 August 2014
# V5.1.7: Relaxed requirement on input maximum velocity shift.
# Minor reorganization of the code structure.
# MC, Oxford, 3 September 2014
# V5.1.8: Fixed program stop with `reddening` keyword. Thanks to Masatao
# Onodera for reporting the problem. MC, Utah, 10 September 2014
# V5.1.9: Pre-compute FFT and oversampling of templates. This speeds up the
# calculation for very long or highly-oversampled spectra. Thanks to
# Remco van den Bosch for reporting situations where this optimization
# may be useful. MC, Las Vegas Airport, 13 September 2014
# V5.1.10: Fixed bug in saving output introduced in previous version.
# MC, Oxford, 14 October 2014
# V5.1.11 -- Reverted change introduced in V5.1.2. Thanks to Nora Lu"tzgendorf
# for reporting problems with oversample. MC, Sydney, 5 February 2015
# V5.1.12 -- Use color= instead of c= to avoid new Matplotlib 1.4 bug.
# MC, Oxford, 25 February 2015
# V5.1.13 -- Updated documentation. MC, Oxford, 24 April 2015
# V5.1.14 -- Fixed deprecation warning in numpy 1.10.
# MC, Oxford, 19 October 2015
# V5.1.15 -- Updated documentation. Thanks to Peter Weilbacher for
# corrections. MC, Oxford, 22 October 2015
# V5.1.16 -- Fixed potentially misleading typo in documentation of MOMENTS.
# MC, Oxford, 9 November 2015
# V5.1.17: Expanded explanation of the relation between output velocity and
# redshift. MC, Oxford, 21 January 2016
# V5.1.18: Fixed deprecation warning in Numpy 1.11. Changed order from 1 to 3
# during oversampling. Warn if sigma is under-sampled.
# MC, Oxford, 20 April 2016
# V5.2.0: Included `bounds`, `fixed` and `fraction` keywords.
# MC, Baltimore, 26 April 2016
# V5.3.0: Included `velscale_ratio` keyword to pass a set of templates with
# higher resolution than the galaxy spectrum.
# Changed `oversample` keyword to require integers not booleans.
# MC, Oxford, 9 May 2016
#
################################################################################
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from numpy.polynomial import legendre
from scipy import ndimage, optimize, linalg
import cap_mpfit as mpfit
#-------------------------------------------------------------------------------
def nnls_flags(A, b, flag):
"""
Solves min||A*x - b|| with
x[j] >= 0 for flag[j] == False
x[j] free for flag[j] == True
where A[m, n], b[m], x[n], flag[n]
"""
m, n = A.shape
AA = np.hstack([A, -A[:, flag]])
x, err = optimize.nnls(AA, b)
x[:n][flag] -= x[n:]
return x[:n]
#-------------------------------------------------------------------------------
def rebin(x, factor):
"""
Rebin a one-dimensional vector by averaging
in groups of "factor" adjacent values
"""
return np.mean(x.reshape(-1, factor), axis=1)
#-------------------------------------------------------------------------------
def robust_sigma(y, zero=False):
"""
Biweight estimate of the scale (standard deviation).
Implements the approach described in
"Understanding Robust and Exploratory Data Analysis"
Hoaglin, Mosteller, Tukey ed., 1983, Chapter 12B, pg. 417
"""
y = np.ravel(y)
d = y if zero else y - np.median(y)
mad = np.median(np.abs(d))
u2 = (d / (9.0*mad))**2 # c = 9
good = u2 < 1.0
u1 = 1.0 - u2[good]
num = y.size * ((d[good]*u1**2)**2).sum()
den = (u1*(1.0 - 5.0*u2[good])).sum()
sigma = np.sqrt(num/(den*(den - 1.0))) # see note in above reference
return sigma
#-------------------------------------------------------------------------------
def reddening_curve(lam, ebv):
"""
Reddening curve of Calzetti et al. (2000, ApJ, 533, 682; here C+00).
This is reliable between 0.12 and 2.2 micrometres.
- LAMBDA is the restframe wavelength in Angstrom of each pixel in the
input galaxy spectrum (1 Angstrom = 1d-4 micrometres)
- EBV is the assumed E(B-V) colour excess to redden the spectrum.
In output the vector FRAC gives the fraction by which the flux at each
wavelength has to be multiplied, to model the dust reddening effect.
"""
lam = 1e4/lam # Convert Angstrom to micrometres and take 1/lambda
rv = 4.05 # C+00 equation (5)
# C+00 equation (3) but extrapolate for lam>2.2
# C+00 equation (4) but extrapolate for lam<0.12
k1 = np.where(lam >= 6300,
rv + 2.659*(1.040*lam - 1.857),
rv + 2.659*(1.509*lam - 0.198*lam**2 + 0.011*lam**3 - 2.156))
fact = 10**(-0.4*ebv*(k1.clip(0))) # Calzetti+00 equation (2) with opposite sign
return fact # The model spectrum has to be multiplied by this vector
#-------------------------------------------------------------------------------
def _bvls_solve(A, b, npoly):
# No need to enforce positivity constraints if fitting one single template:
# use faster linear least-squares solution instead of NNLS.
#
m, n = A.shape
if m == 1: # A is a vector, not an array
soluz = A.dot(b)/A.dot(A)
elif n == npoly + 1: # Fitting a single template
soluz = linalg.lstsq(A, b)[0]
else: # Fitting multiple templates
flag = np.zeros(n, dtype=bool)
flag[:npoly] = True # flag = True on Legendre polynomials
soluz = nnls_flags(A, b, flag)
return soluz
#-------------------------------------------------------------------------------
def _rfft_templates(templates, vsyst, vlims, sigmax, factor, nspec, ratio):
"""
Pre-compute the FFT and possibly oversample the templates
"""
# Sample the LOSVD at least to vsyst+vel+5*sigma for all kinematic components
#
if nspec == 2:
dx = int(np.max(np.ceil(abs(vsyst) + abs(vlims) + 5*sigmax)))
else:
dx = int(np.max(np.ceil(abs(vsyst + vlims) + 5*sigmax)))
# Oversample all templates (if requested)
#
if factor > 1 and ratio is None:
templates = ndimage.interpolation.zoom(templates, [factor, 1], order=3)
nk = 2*dx*factor + 1
nf = templates.shape[0]
npad = int(2**np.ceil(np.log2(nf + nk/2))) # vector length for zero padding
# Pre-compute the FFT of all templates
# (Use Numpy's rfft as Scipy adopted an odd output format)
#
rfft_templates = np.fft.rfft(templates, n=npad, axis=0)
return rfft_templates, npad
#-------------------------------------------------------------------------------
class ppxf(object):
def __init__(self, templates, galaxy, noise, velScale, start,
bias=None, clean=False, degree=4, fraction=None, goodpixels=None,
mask=None, mdegree=0, moments=2, oversample=None, plot=False,
quiet=False, sky=None, vsyst=0, regul=0, lam=None, reddening=None,
component=0, reg_dim=None, fixed=None, bounds=None,
velscale_ratio=None):
# Do extensive checking of possible input errors
#
self.galaxy = galaxy
self.noise = noise
self.clean = clean
self.fraction = fraction
self.degree = max(degree, -1)
self.mdegree = max(mdegree, 0)
self.oversample = oversample
self.quiet = quiet
self.sky = sky
self.vsyst = vsyst
self.regul = regul
self.lam = lam
self.reddening = reddening
self.reg_dim = np.asarray(reg_dim)
self.star = templates.reshape(templates.shape[0], -1)
self.npix_temp, self.ntemp = self.star.shape
self.factor = 1 # default value
if (velscale_ratio is not None) and (oversample is not None):
raise ValueError('One cannot use OVERSAMPLE with VELSCALE_RATIO')
if velscale_ratio is not None:
if not isinstance(velscale_ratio, int):
raise ValueError('VELSCALE_RATIO must be an integer')
self.npix_temp -= self.npix_temp%velscale_ratio
self.star = self.star[:self.npix_temp, :] # Make size multiple of velscale_ratio
self.npix_temp //= velscale_ratio # This is the size after rebin()
self.factor = velscale_ratio
if oversample is not None:
if not isinstance(oversample, int):
raise ValueError('OVERSAMPLE must be an integer')
self.factor = oversample
component = np.atleast_1d(component)
if component.dtype != int:
raise ValueError('COMPONENT must be integers')
if component.size == 1 and self.ntemp > 1: # component is a scalar
self.component = np.zeros(self.ntemp, dtype=int) # all templates have the same LOSVD
else:
if component.size != self.ntemp:
raise ValueError('There must be one kinematic COMPONENT per template')
self.component = component
tmp = np.unique(component)
self.ncomp = tmp.size
if not np.array_equal(tmp, np.arange(self.ncomp)):
raise ValueError('must be 0 < COMPONENT < NCOMP-1')
if fraction is not None:
if fraction < 0 or fraction > 1:
raise ValueError("Must be `0 < fraction < 1`")
if self.ncomp < 2:
raise ValueError("At least 2 components are needed with FRACTION keyword")
if regul > 0 and reg_dim is None:
if self.ncomp == 1:
self.reg_dim = np.asarray(templates.shape[1:])
else:
raise ValueError('REG_DIM must be specified with more than one component')
moments = np.atleast_1d(moments)
if moments.size == 1: # moments is scalar: all LOSVDs have same number of G-H moments
self.moments = np.full(self.ncomp, np.abs(moments), dtype=int)
else:
self.moments = np.abs(moments)
if tmp.size != self.moments.size:
raise ValueError('MOMENTS must be an array of length NCOMP')
if regul is None:
self.regul = 0
s2 = galaxy.shape
s3 = noise.shape
if sky is not None:
s4 = sky.shape
if s4[0] != s2[0]:
raise ValueError('SKY must have the same size as GALAXY')
if len(s2) > 2 or len(s3) > 2:
raise ValueError('Wrong GALAXY or NOISE input dimensions')
if len(s3) > 1 and s3[0] == s3[1]: # NOISE is a 2-dim covariance matrix
if s3[0] != s2[0]:
raise ValueError('Covariance Matrix must have size xpix*npix')
noise = linalg.cholesky(noise, lower=1) # Cholesky factor of symmetric, positive-definite covariance matrix
self.noise = linalg.solve_triangular(noise, np.identity(s3[0]), lower=1) # Invert Cholesky factor
else: # NOISE is an error spectrum
if not np.equal(s2, s3):
raise ValueError('GALAXY and NOISE must have the same size/type')
if not np.all((noise > 0) & np.isfinite(noise)):
raise ValueError('NOISE must be a positive vector')
if self.npix_temp < s2[0]:
raise ValueError('STAR length cannot be smaller than GALAXY')
if reddening is not None:
if lam is None or not np.equal(lam.shape, s2):
raise ValueError('LAMBDA and GALAXY must have the same size/type')
if mdegree > 0:
raise ValueError('MDEGREE cannot be used with REDDENING keyword')
if mask is not None:
if mask.dtype != bool:
raise ValueError('MASK must be a boolean vector')
if mask.shape != galaxy.shape:
raise ValueError('MASK and GALAXY must have the same size')
if goodpixels is None:
goodpixels = np.flatnonzero(mask)
else:
raise ValueError('GOODPIXELS and MASK cannot be used together')
if goodpixels is None:
self.goodpixels = np.arange(s2[0])
else:
if np.any(np.diff(goodpixels) <= 0):
raise ValueError('goodpixels is not monotonic or contains duplicated values')
if goodpixels[0] < 0 or goodpixels[-1] > s2[0]-1:
raise ValueError('goodpixels are outside the data range')
self.goodpixels = goodpixels
if bias is None:
self.bias = 0.7*np.sqrt(500./np.size(self.goodpixels)) # pPXF paper pg.144 left
else:
self.bias = bias
for j in range(self.ncomp):
if self.moments[j] not in [2, 4, 6]:
raise ValueError('MOMENTS should be 2, 4 or 6 (or negative to keep kinematics fixed)')
start = np.atleast_2d(start)
if len(start) != self.ncomp:
raise ValueError('START must have one row per kinematic component')
if bounds is not None:
if self.ncomp == 1:
bounds = np.atleast_3d([bounds])
else:
bounds = np.atleast_3d(bounds)
if np.squeeze(bounds).shape[:-1] != np.squeeze(start).shape:
raise ValueError('BOUNDS must have the same shape as START')
if fixed is not None:
fixed = np.atleast_2d(fixed)
if fixed.shape != start.shape:
raise ValueError('FIXED must have the same shape as START')
if len(s2) == 2:
self.goodpixels = np.array([self.goodpixels, s2[0] + self.goodpixels]) # two-sided fitting of LOSVD
if vsyst == 0:
if len(s2) == 2:
raise ValueError('VSYST must be defined for two-sided fitting')
else:
self.vsyst = vsyst / velScale
ngh = self.moments.sum()
npars = ngh + self.mdegree*len(s2)
if reddening is not None:
npars += 1
# Explicitly specify the step for the numerical derivatives
# in MPFIT routine and force safety limits on the fitting parameters.
#
# Set [h3, h4, ...] and mult. polynomials to zero as initial guess
# and constrain -0.3 < [h3, h4, ...] < 0.3
#
parinfo = [{'step': 1e-3, 'limits': [-0.3, 0.3], 'limited': [1, 1],
'value': 0., 'fixed': 0} for j in range(npars)]
p = 0
vlims = np.empty(2*self.ncomp)
for j in range(self.ncomp):
st1, st2 = start[j, :2]/velScale # Convert velocity scale to pixels
if bounds is None:
bn1 = st1 + np.array([-2e3, 2e3])/velScale # +/-2000 km/s from first guess
bn2 = [0.1, 1e3/velScale] # hard-coded velScale/10 < sigma < 1000 km/s
else:
bn1, bn2 = bounds[j, :2]/velScale
parinfo[0 + p]['value'] = st1.clip(*bn1)
parinfo[0 + p]['limits'] = vlims[2*j:2*j+2] = bn1
parinfo[0 + p]['step'] = 1e-2
parinfo[1 + p]['value'] = st2.clip(*bn2)
parinfo[1 + p]['limits'] = bn2
parinfo[1 + p]['step'] = 1e-2
if self.npix_temp <= 2*(abs(self.vsyst + st1) + 5*st2):
raise ValueError('Velocity shift too big: Adjust wavelength ranges of spectrum and templates')
for k in range(self.moments[j]):
if moments[j] < 0: # negative moments --> keep entire LOSVD fixed
parinfo[k + p]['fixed'] = 1
elif fixed is not None: # Keep individual LOSVD parameters fixed
parinfo[k + p]['fixed'] = fixed[j, k]
if k > 1:
parinfo[k + p]['value'] = start[j, k]
if bounds is not None:
parinfo[k + p]['limits'] = bounds[j, k]
p += self.moments[j]
if mdegree > 0:
for j in range(ngh, npars):
parinfo[j]['limits'] = [-1., 1.] # force <100% corrections
elif reddening is not None:
parinfo[ngh]['value'] = reddening
parinfo[ngh]['limits'] = [0., 10.] # force positive E(B-V) < 10 mag
# Pre-compute the FFT and possibly oversample the templates
#
self.star_rfft, self.npad = _rfft_templates(
self.star, self.vsyst, vlims, parinfo[1]['limits'][1],
self.factor, galaxy.ndim, velscale_ratio)
# Here the actual calculation starts.
# If required, once the minimum is found, clean the pixels deviating
# more than 3*sigma from the best fit and repeat the minimization
# until the set of cleaned pixels does not change any more.
#
good = self.goodpixels.copy()
for j in range(5): # Do at most five cleaning iterations
self.clean = False # No cleaning during chi2 optimization
mp = mpfit.mpfit(self._fitfunc, parinfo=parinfo, quiet=1, ftol=1e-4)
ncalls = mp.nfev
if not clean:
break
goodOld = self.goodpixels.copy()
self.goodpixels = good.copy() # Reset goodpixels
self.clean = True # Do cleaning during linear fit
self._fitfunc(mp.params)
if np.array_equal(goodOld, self.goodpixels):
break
# Evaluate scatter at the bestfit (with BIAS=0)
# and also get the output bestfit and weights.
#
self.bias = 0
status, err = self._fitfunc(mp.params)
self.chi2 = robust_sigma(err, zero=True)**2 # Robust computation of Chi**2/DOF.
p = 0
self.sol = []
self.error = []
for j in range(self.ncomp):
mp.params[p:p + 2] *= velScale # Bring velocity scale back to km/s
self.sol.append(mp.params[p:self.moments[j] + p])
mp.perror[p:p + 2] *= velScale # Bring velocity scale back to km/s
self.error.append(mp.perror[p:self.moments[j] + p])
p += self.moments[j]
if mdegree > 0:
self.mpolyweights = mp.params[p:]
if reddening is not None:
self.reddening = mp.params[-1] # Replace input with best fit
if degree >= 0:
self.polyweights = self.weights[:(self.degree + 1)*len(s2)] # output weights for the additive polynomials
self.weights = self.weights[(self.degree + 1)*len(s2):] # output weights for the templates (or sky) only
if not quiet:
print("Best Fit: V sigma h3 h4 h5 h6")
for j in range(self.ncomp):
print("comp.", j, "".join("%10.3g" % f for f in self.sol[j]))
if self.sol[j][1] < velScale/2 and velscale_ratio is None:
print("Warning: sigma is under-sampled and unreliable. "
"Resample the input spectra if possible.")
print("chi2/DOF: %.4g" % self.chi2)
print('Function evaluations:', ncalls)
nw = self.weights.size
if reddening is not None:
print('Reddening E(B-V): ', self.reddening)
print('Nonzero Templates: ', np.sum(self.weights > 0), ' / ', nw)
if self.weights.size <= 20:
print('Templates weights:')
print("".join("%10.3g" % f for f in self.weights))
if fraction is not None:
fracFit = np.sum(self.weights[component==0])/np.sum(self.weights[component<2])
if abs(fracFit - fraction) > 0.01:
print("Warning: FRACTION is inaccurate. TEMPLATES and GALAXY "
"should have mean ~ 1 when using the FRACTION keyword")
if self.ncomp ==1:
self.sol = self.sol[0]
self.error = self.error[0]
# Plot final data-model comparison if required.
#
if plot:
mn = np.min(self.bestfit[self.goodpixels])
mx = np.max(self.bestfit[self.goodpixels])
resid = mn + self.galaxy - self.bestfit
mn1 = np.min(resid[self.goodpixels])
plt.xlabel("Pixels")
plt.ylabel("Counts")
plt.xlim(np.array([-0.02, 1.02])*self.galaxy.size)
plt.ylim([mn1, mx] + np.array([-0.05, 0.05])*(mx - mn1))
plt.plot(self.galaxy, 'k')
plt.plot(self.bestfit, 'r', linewidth=2)
plt.plot(self.goodpixels, resid[self.goodpixels], 'd', color='LimeGreen', mec='LimeGreen', ms=4)
plt.plot(self.goodpixels, self.goodpixels*0+mn, '.k', ms=1)
w = np.nonzero(np.diff(self.goodpixels) > 1)[0]
if w.size > 0:
for wj in w:
x = np.arange(self.goodpixels[wj], self.goodpixels[wj+1])
plt.plot(x, resid[x],'b')
w = np.hstack([0, w, w+1, -1]) # Add first and last point
else:
w = [0, -1]
for gj in self.goodpixels[w]:
plt.plot([gj, gj], [mn, self.bestfit[gj]], 'LimeGreen')
#-------------------------------------------------------------------------------
def _fitfunc(self, pars, fjac=None):
# pars = [vel_1, sigma_1, h3_1, h4_1, ... # Velocities are in pixels.
# ... # For all kinematic components
# vel_n, sigma_n, h3_n, h4_n, ...
# m1, m2, ...] # Multiplicative polynomials
nspec = self.galaxy.ndim
npix = self.galaxy.shape[0]
ngh = pars.size - self.mdegree*nspec # Parameters of the LOSVD only
if self.reddening is not None:
ngh -= 1 # Fitting reddening
# Find indices of vel_j for all kinematic components
#
vj = np.append(0, np.cumsum(self.moments)[:-1])
# Sample the LOSVD at least to vsyst+vel+5*sigma for all kinematic components
#
if nspec == 2:
dx = int(np.ceil(np.max(abs(self.vsyst) + abs(pars[0+vj]) + 5*pars[1+vj])))
else:
dx = int(np.ceil(np.max(abs(self.vsyst + pars[0+vj]) + 5*pars[1+vj])))
nl = 2*dx*self.factor + 1
x = np.linspace(-dx, dx, nl) # Evaluate the Gaussian using steps of 1/factor pixel
losvd = np.empty((nl, self.ncomp, nspec))
for j, p in enumerate(vj): # loop over kinematic components
for k in range(nspec): # nspec=2 for two-sided fitting, otherwise nspec=1
s = 1 if k == 0 else -1 # s=+1 for left spectrum, s=-1 for right one
vel = self.vsyst + s*pars[0+p]
w = (x - vel)/pars[1+p]
w2 = w**2
gauss = np.exp(-0.5*w2)
losvd[:, j, k] = gauss/gauss.sum()
# Hermite polynomials normalized as in Appendix A of van der Marel & Franx (1993).
# Coefficients for h5, h6 are given e.g. in Appendix C of Cappellari et al. (2002)
#
if self.moments[j] > 2: # h_3 h_4
poly = 1 + s*pars[2+p]/np.sqrt(3)*(w*(2*w2-3)) \
+ pars[3+p]/np.sqrt(24)*(w2*(4*w2-12)+3)
if self.moments[j] == 6: # h_5 h_6
poly += s*pars[4+p]/np.sqrt(60)*(w*(w2*(4*w2-20)+15)) \
+ pars[5+p]/np.sqrt(720)*(w2*(w2*(8*w2-60)+90)-15)
losvd[:, j, k] *= poly
# Normalization for LOSVD
losvd[:, j, k] /= losvd[:, j, k].sum()
# Compute the FFT of all LOSVDs
#
losvd_pad = np.zeros((self.npad, self.ncomp, nspec))
losvd_pad[:nl, :, :] = losvd # Zero padding
losvd_pad = np.roll(losvd_pad, (2 - nl)//2, axis=0) # Bring kernel center to first position
losvd_rfft = np.fft.rfft(losvd_pad, axis=0)
# The zeroth order multiplicative term is already included in the
# linear fit of the templates. The polynomial below has mean of 1.
#
x = np.linspace(-1, 1, npix) # X needs to be within [-1, 1] for Legendre Polynomials
if self.mdegree > 0:
if nspec == 2: # Different multiplicative poly for left and right spectra
mpoly1 = legendre.legval(x, np.append(1.0, pars[ngh::2]))
mpoly2 = legendre.legval(x, np.append(1.0, pars[ngh+1::2]))
mpoly = np.append(mpoly1, mpoly2)
else:
mpoly = legendre.legval(x, np.append(1.0, pars[ngh:]))
else:
mpoly = 1.0
# Multiplicative polynomials do not make sense when fitting reddening.
# In that case one has to assume the spectrum is well calibrated.
#
if self.reddening is not None:
mpoly = reddening_curve(self.lam, pars[ngh])
skydim = len(np.shape(self.sky)) # This can be zero
if skydim == 0:
nsky = 0
elif skydim == 1:
nsky = 1 # Number of sky spectra
else:
nsky = np.shape(self.sky)[1]
npoly = (self.degree + 1)*nspec # Number of additive polynomials in the fit
nrows = npoly + nsky*nspec + self.ntemp
ncols = npix*nspec
if self.regul > 0:
dim = self.reg_dim.size
reg2 = self.reg_dim - 2
if dim == 1:
nreg = reg2
elif dim == 2:
nreg = 2*np.prod(reg2) + 2*np.sum(reg2) # Rectangle sides have one finite difference
elif dim == 3: # Hyper-rectangle edges have one finite difference
nreg = 3*np.prod(reg2) + 4*np.sum(reg2) \
+ 4*(np.prod(reg2[[0, 1]]) + np.prod(reg2[[0, 2]]) + np.prod(reg2[[1, 2]]))
ncols += nreg
if self.fraction is not None:
ncols += 1
c = np.zeros((npix*nspec, nrows)) # This array is used for estimating predictions
if self.degree >= 0: # Fill first columns of the Design Matrix
vand = legendre.legvander(x, self.degree)
if nspec == 2:
for j, leg in enumerate(vand.T):
c[:npix, 2*j] = leg # Additive polynomials for left spectrum
c[npix:, 2*j+1] = leg # Additive polynomials for right spectrum
else:
c[:, :npoly] = vand
tmp = np.empty((self.npix_temp, nspec))
for j, star_rfft in enumerate(self.star_rfft.T): # loop over columns
for k in range(nspec):
tt = np.fft.irfft(star_rfft*losvd_rfft[:, self.component[j], k])
if self.factor == 1: # No oversampling
tmp[:, k] = tt[:self.npix_temp]
else: # Template was oversampled before convolution
tmp[:, k] = rebin(tt[:self.npix_temp*self.factor], self.factor)
c[:, npoly+j] = mpoly*tmp[:npix, :].ravel() # reform into a vector
for j in range(nsky):
skyj = self.sky[:, j]
k = npoly + self.ntemp
if nspec == 2:
c[:npix, k+2*j] = skyj # Sky for left spectrum
c[npix:, k+2*j+1] = skyj # Sky for right spectrum
else:
c[:, k+j] = skyj
a = np.zeros((ncols, nrows)) # This array is used for the system solution
s3 = self.noise.shape
if len(s3) > 1 and s3[0] == s3[1]: # input NOISE is a npix*npix covariance matrix
a[:npix*nspec, :] = self.noise.dot(c)
b = self.noise.dot(self.galaxy)
else: # input NOISE is a 1sigma error vector
a[:npix*nspec, :] = c / self.noise[:, None] # Weight all columns with errors
b = self.galaxy / self.noise
# Add second-degree 1D, 2D or 3D linear regularization
# Press W.H., et al., 2007, Numerical Recipes, 3rd ed., equation (19.5.10)
#
if self.regul > 0:
i = npoly + np.arange(np.prod(self.reg_dim)).reshape(self.reg_dim)
p = npix*nspec
diff = np.array([1, -2, 1])*self.regul
ind = np.array([-1, 0, 1])
if dim == 1:
for j in range(1, self.reg_dim-1):
a[p, i[j+ind]] = diff
p += 1
elif dim == 2:
for k in range(self.reg_dim[1]):
for j in range(self.reg_dim[0]):
if 0 != j != self.reg_dim[0]-1:
a[p, i[j+ind, k]] = diff
p += 1
if 0 != k != self.reg_dim[1]-1:
a[p, i[j, k+ind]] = diff
p += 1
elif dim == 3:
for q in range(self.reg_dim[2]):
for k in range(self.reg_dim[1]):
for j in range(self.reg_dim[0]):
if 0 != j != self.reg_dim[0]-1:
a[p, i[j+ind, k, q]] = diff
p += 1
if 0 != k != self.reg_dim[1]-1:
a[p, i[j, k+ind, q]] = diff
p += 1
if 0 != q != self.reg_dim[2]-1:
a[p, i[j, k, q+ind]] = diff
p += 1
if self.fraction is not None:
ff = a[-1, -self.ntemp:]
ff[self.component == 0] = self.fraction - 1
ff[self.component == 1] = self.fraction
ff *= 1e9
# Select the spectral region to fit and solve the over-conditioned system
# using SVD/BVLS. Use unweighted array for estimating bestfit predictions.
# Iterate to exclude pixels deviating more than 3*sigma if /CLEAN keyword is set.
m = 1
while m != 0:
if self.regul > 0 or self.fraction is not None:
if self.regul == 0:
nreg = 1
aa = a[np.append(self.goodpixels, np.arange(npix*nspec, ncols)), :]
bb = np.append(b[self.goodpixels], np.zeros(nreg))
else:
aa = a[self.goodpixels, :]
bb = b[self.goodpixels]
self.weights = _bvls_solve(aa, bb, npoly)
self.bestfit = c.dot(self.weights)
if len(s3) > 1 and s3[0] == s3[1]: # input NOISE is a npix*npix covariance matrix
err = self.noise.dot(self.galaxy - self.bestfit)[self.goodpixels]
else: # input NOISE is a 1sigma error vector
err = ((self.galaxy - self.bestfit)/self.noise)[self.goodpixels]
if self.clean:
w = np.abs(err) < 3 # select residuals smaller than 3*sigma
m = err.size - w.sum()
if m > 0:
self.goodpixels = self.goodpixels[w]
if not self.quiet:
print('Outliers:', m)
else:
break
self.matrix = c # Return LOSVD-convolved design matrix
# Penalize the solution towards (h3, h4, ...) = 0 if the inclusion of
# these additional terms does not significantly decrease the error.
# The lines below implement eq.(8)-(9) in Cappellari & Emsellem (2004)
#
if np.any(self.moments > 2) and self.bias != 0:
D2 = 0.
for j, p in enumerate(vj): # loop over kinematic components
if self.moments[j] > 2:
D2 += np.sum(pars[2+p:self.moments[j]+p]**2) # eq.(8)
err += self.bias*robust_sigma(err, zero=True)*np.sqrt(D2) # eq.(9)
return 0, err
#-------------------------------------------------------------------------------
| gpl-3.0 |
Srisai85/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 129 | 10192 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
EikeTrumann/raspiTempLog | python/plot-all.py | 1 | 1901 | # This script plots the data from all sensors into a single file
import strings
# Die Importreihenfolge ist wichtig!
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Benennung der Sensoren (laut config-Datei)
names = []
# Textstrings fuer den Plot
stringressources = {}
# Konfigurationsdatei oeffnen
configfile = open("/home/pi/messungen/config/sensoren",'r')
config = configfile.readlines()
configfile.close()
# Auswerten der Konfigurationsdatei
for line in config:
line = line.split(' ')
names.append(line[0])
# Strings aus configfile auslesen
with open("/home/pi/messungen/config/strings") as f:
for line in f:
(key, val) = line.split()
d[int(key)] = val
# Start der Messreihe aus config auslesen
# startfile = open("/home/pi/messungen/config/start",'r')
# start = float(startfile.readline())
# startfile.close()
plt.figure(figsize=(32,16))
plot = plt.subplot(111)
for name in names:
# Daten aus csv-Datei in der Arbeitsspeicher lesen
csvfile = open("/home/pi/messungen/logs/"+name+".csv",'r')
csv = csvfile.readlines()
csvfile.close()
# Listen fuer die zu speichernden Daten
time = []
data = []
# Daten zeilenweise in Liste uebertragen
for line in csv:
values = line.split(',')
# Wenn vor Startzeitpunkt: nicht eintragen
#if(float(values[0]) < float(start):
# continue
time.append(float(values[1]))
data.append(values[2])
# Zeichnen
plot.plot(time, data, label=name.title())
# Beschriften und Ausgeben der Zeichnungen
box = plot.get_position()
plot.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9])
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=6)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.grid(True)
plt.savefig("/home/pi/messungen/plot/"+"all"+".png", format="png")
plt.savefig("/home/pi/messungen/plot/"+"all"+".pdf", format="pdf")
| gpl-3.0 |
pierreg/tensorflow | tensorflow/contrib/factorization/python/ops/kmeans_test.py | 23 | 14710 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
import tensorflow as tf
from tensorflow.python.platform import benchmark
FLAGS = tf.app.flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(np.random.rand(num_centers, num_dims).astype(np.float32) *
center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(np.random.randn(num_points, num_dims).astype(np.float32) *
max_offset)
return (centers[assignments] + offsets,
assignments,
np.add.reduce(offsets * offsets, 1))
class KMeansTest(tf.test.TestCase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 10000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
self.kmeans = tf.contrib.factorization.KMeansClustering(
self.num_centers,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
use_mini_batch=self.use_mini_batch,
config=self.config(14),
random_seed=12)
@staticmethod
def config(tf_random_seed):
return tf.contrib.learn.RunConfig(tf_random_seed=tf_random_seed)
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
def test_clusters(self):
kmeans = self.kmeans
kmeans.fit(x=self.points, steps=1, batch_size=8)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape),
[self.num_centers, self.num_dims])
def test_fit(self):
if self.batch_size != self.num_points:
# TODO(agarwal): Doesn't work with mini-batch.
return
kmeans = self.kmeans
kmeans.fit(x=self.points,
steps=1, batch_size=self.batch_size)
score1 = kmeans.score(x=self.points)
kmeans.fit(x=self.points,
steps=15 * self.num_points // self.batch_size,
batch_size=self.batch_size)
score2 = kmeans.score(x=self.points)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.batch_size != self.num_points:
# TODO(agarwal): Doesn't work with mini-batch.
return
kmeans = tf.contrib.factorization.KMeansClustering(
self.num_centers,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
use_mini_batch=self.use_mini_batch,
config=tf.contrib.learn.RunConfig(tf_random_seed=14),
random_seed=12)
kmeans.fit(x=self.points,
# Force it to train forever until the monitor stops it.
steps=None,
batch_size=self.batch_size,
relative_tolerance=1e-4)
score = kmeans.score(x=self.points)
self.assertNear(self.true_score, score, self.true_score * 0.005)
def test_infer(self):
kmeans = self.kmeans
kmeans.fit(x=self.points, steps=10, batch_size=128)
clusters = kmeans.clusters()
# Make a small test set
points, true_assignments, true_offsets = make_random_points(clusters, 10)
# Test predict
assignments = kmeans.predict(points, batch_size=self.batch_size)
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(points, batch_size=128)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(points, batch_size=128)
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_fit_with_cosine_distance(self):
# Create points on y=x and y=1.5x lines to check the cosine similarity.
# Note that euclidean distance will give different results in this case.
points = np.array(
[[9, 9], [0.5, 0.5], [10, 15], [0.4, 0.6]], dtype=np.float32)
# true centers are the unit vectors on lines y=x and y=1.5x
true_centers = np.array(
[[0.70710678, 0.70710678], [0.5547002, 0.83205029]], dtype=np.float32)
kmeans = tf.contrib.factorization.KMeansClustering(
2,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(2),
random_seed=12)
kmeans.fit(x=points, steps=10, batch_size=4)
centers = normalize(kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(true_centers, axis=0))
def test_transform_with_cosine_distance(self):
points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2],
[0.1, 2.5], [0.2, 2], [0.1, 3], [0.2, 4]], dtype=np.float32)
true_centers = [normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[0:4, :], axis=0,
keepdims=True))[0]]
kmeans = tf.contrib.factorization.KMeansClustering(
2,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(5))
kmeans.fit(x=points, steps=50, batch_size=8)
centers = normalize(kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(true_centers, axis=0),
atol=1e-2)
true_transform = 1 - cosine_similarity(points, centers)
transform = kmeans.transform(points, batch_size=8)
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict_with_cosine_distance(self):
points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2],
[0.1, 2.5], [0.2, 2], [0.1, 3], [0.2, 4]], dtype=np.float32)
true_centers = np.array(
[normalize(np.mean(normalize(points)[0:4, :],
axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :],
axis=0,
keepdims=True))[0]], dtype=np.float32)
true_assignments = [0] * 4 + [1] * 4
true_score = len(points) - np.tensordot(normalize(points),
true_centers[true_assignments])
kmeans = tf.contrib.factorization.KMeansClustering(
2,
initial_clusters=tf.contrib.factorization.RANDOM_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(3))
kmeans.fit(x=points, steps=30, batch_size=8)
centers = normalize(kmeans.clusters())
self.assertAllClose(np.sort(centers, axis=0),
np.sort(true_centers, axis=0), atol=1e-2)
assignments = kmeans.predict(points, batch_size=8)
self.assertAllClose(centers[assignments],
true_centers[true_assignments], atol=1e-2)
score = kmeans.score(points, batch_size=8)
self.assertAllClose(score, true_score, atol=1e-2)
def test_predict_with_cosine_distance_and_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array([[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3],
[-3.1, -3.2], [-2.8, -3.], [-2.9, -3.1], [-3., -3.1],
[-3., -3.1], [-3.2, -3.], [-3., -3.]], dtype=np.float32)
true_centers = np.array(
[normalize(np.mean(normalize(points)[0:2, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[2:4, :], axis=0,
keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]], dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(normalize(points),
true_centers[true_assignments])
kmeans = tf.contrib.factorization.KMeansClustering(
3,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
distance_metric=tf.contrib.factorization.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
config=self.config(3))
kmeans.fit(x=points, steps=30, batch_size=12)
centers = normalize(kmeans.clusters())
self.assertAllClose(sorted(centers.tolist()),
sorted(true_centers.tolist()),
atol=1e-2)
assignments = kmeans.predict(points, batch_size=12)
self.assertAllClose(centers[assignments],
true_centers[true_assignments], atol=1e-2)
score = kmeans.score(points, batch_size=12)
self.assertAllClose(score, true_score, atol=1e-2)
def test_fit_raise_if_num_clusters_larger_than_num_points_random_init(self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError('less'):
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=3, initial_clusters=tf.contrib.factorization.RANDOM_INIT)
kmeans.fit(x=points, steps=10, batch_size=8)
def test_fit_raise_if_num_clusters_larger_than_num_points_kmeans_plus_plus(
self):
points = np.array([[2.0, 3.0], [1.6, 8.2]], dtype=np.float32)
with self.assertRaisesOpError(AssertionError):
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=3,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT)
kmeans.fit(x=points, steps=10, batch_size=8)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self, dimension=50, num_clusters=50, points_per_cluster=10000,
center_norm=500, cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(self.num_clusters, dimension,
center_norm=center_norm)
self.points, _, scores = make_random_points(self.centers, self.num_points,
max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(iters=num_iters, wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = tf.contrib.factorization.KMeansClustering(
self.num_clusters,
initial_clusters=tf.contrib.factorization.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
tf_kmeans.fit(x=self.points, batch_size=self.num_points, steps=50,
relative_tolerance=1e-6)
_ = tf_kmeans.clusters()
scores.append(tf_kmeans.score(self.points))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(n_clusters=self.num_clusters,
init='k-means++',
max_iter=50, n_init=1, tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
jwiggins/scikit-image | doc/examples/edges/plot_circular_elliptical_hough_transform.py | 6 | 4826 | """
========================================
Circular and Elliptical Hough Transforms
========================================
The Hough transform in its simplest form is a `method to detect
straight lines <http://en.wikipedia.org/wiki/Hough_transform>`__
but it can also be used to detect circles or ellipses.
The algorithm assumes that the edge is detected and it is robust against
noise or missing points.
Circle detection
================
In the following example, the Hough transform is used to detect
coin positions and match their edges. We provide a range of
plausible radii. For each radius, two circles are extracted and
we finally keep the five most prominent candidates.
The result shows that coin positions are well-detected.
Algorithm overview
------------------
Given a black circle on a white background, we first guess its
radius (or a range of radii) to construct a new circle.
This circle is applied on each black pixel of the original picture
and the coordinates of this circle are voting in an accumulator.
From this geometrical construction, the original circle center
position receives the highest score.
Note that the accumulator size is built to be larger than the
original picture in order to detect centers outside the frame.
Its size is extended by two times the larger radius.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, color
from skimage.transform import hough_circle
from skimage.feature import peak_local_max, canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
# Load picture and detect edges
image = img_as_ubyte(data.coins()[0:95, 70:370])
edges = canny(image, sigma=3, low_threshold=10, high_threshold=50)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(5, 2))
# Detect two radii
hough_radii = np.arange(15, 30, 2)
hough_res = hough_circle(edges, hough_radii)
centers = []
accums = []
radii = []
for radius, h in zip(hough_radii, hough_res):
# For each radius, extract two circles
num_peaks = 2
peaks = peak_local_max(h, num_peaks=num_peaks)
centers.extend(peaks)
accums.extend(h[peaks[:, 0], peaks[:, 1]])
radii.extend([radius] * num_peaks)
# Draw the most prominent 5 circles
image = color.gray2rgb(image)
for idx in np.argsort(accums)[::-1][:5]:
center_x, center_y = centers[idx]
radius = radii[idx]
cx, cy = circle_perimeter(center_y, center_x, radius)
image[cy, cx] = (220, 20, 20)
ax.imshow(image, cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure
Ellipse detection
=================
In this second example, the aim is to detect the edge of a coffee cup.
Basically, this is a projection of a circle, i.e. an ellipse.
The problem to solve is much more difficult because five parameters have to be
determined, instead of three for circles.
Algorithm overview
------------------
The algorithm takes two different points belonging to the ellipse. It assumes
that it is the main axis. A loop on all the other points determines how much
an ellipse passes to them. A good match corresponds to high accumulator values.
A full description of the algorithm can be found in reference [1]_.
References
----------
.. [1] Xie, Yonghong, and Qiang Ji. "A new efficient ellipse detection
method." Pattern Recognition, 2002. Proceedings. 16th International
Conference on. Vol. 2. IEEE, 2002
"""
import matplotlib.pyplot as plt
from skimage import data, color
from skimage.feature import canny
from skimage.transform import hough_ellipse
from skimage.draw import ellipse_perimeter
# Load picture, convert to grayscale and detect edges
image_rgb = data.coffee()[0:220, 160:420]
image_gray = color.rgb2gray(image_rgb)
edges = canny(image_gray, sigma=2.0,
low_threshold=0.55, high_threshold=0.8)
# Perform a Hough Transform
# The accuracy corresponds to the bin size of a major axis.
# The value is chosen in order to get a single high accumulator.
# The threshold eliminates low accumulators
result = hough_ellipse(edges, accuracy=20, threshold=250,
min_size=100, max_size=120)
result.sort(order='accumulator')
# Estimated parameters for the ellipse
best = list(result[-1])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]
# Draw the ellipse on the original image
cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
image_rgb[cy, cx] = (0, 0, 255)
# Draw the edge (white) and the resulting ellipse (red)
edges = color.gray2rgb(edges)
edges[cy, cx] = (250, 0, 0)
fig2, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(8, 4), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
ax1.set_title('Original picture')
ax1.imshow(image_rgb)
ax2.set_title('Edge (white) and result (red)')
ax2.imshow(edges)
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| bsd-3-clause |
jisazaTappsi/shatter | tests/test_float_input.py | 2 | 6650 | #!/usr/bin/env python
"""Tests for float_input.py"""
import unittest
import pandas as pd
from sklearn import datasets
from shatter.constants import *
from shatter.solver import Rules
from tests.generated_code import float_input_functions as f
from tests.testing_helpers import common_testing_code
__author__ = 'juan pablo isaza'
class FloatInputTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
common_testing_code.reset_functions_file(f.__file__, hard_reset=True)
def test_simple_integer_input(self):
"""
Simple integer input
"""
function = f.simple
code = ["def {}(a):".format(function.__name__),
" return a>=2.5"]
r = Rules()
r.add(a=0, output=0)
r.add(a=1, output=0)
r.add(a=2, output=0)
r.add(a=3, output=1)
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
def test_simple2_integer_input(self):
"""
Bit more complex Simple integer input
"""
function = f.bit_more_complex
code = ["def {}(a):".format(function.__name__),
" return (a>=0.5 and a<=2.5)"]
r = Rules()
r.add(a=0, output=0)
r.add(a=1, output=1)
r.add(a=2, output=1)
r.add(a=3, output=0)
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
def test_2_integer_inputs_easy(self):
"""
The hypothesis should simplify to single interval of one of the 2 variables.
"""
function = f.two_inputs_bit_more_complex
code_solution_1 = ["def {}(a, b):".format(function.__name__),
" return (a>=0.5 and a<=2.5)"]
code_solution_2 = ["def {}(a, b):".format(function.__name__),
" return (b>=0.5 and b<=2.5)"]
r = Rules()
r.add(a=0, b=0, output=0)
r.add(a=1, b=1, output=1)
r.add(a=2, b=2, output=1)
r.add(a=3, b=3, output=0)
solution = r.solve(function)
try:
self.assertEqual(solution.implementation, code_solution_1)
except AssertionError:
self.assertEqual(solution.implementation, code_solution_2)
def test_many_integer_inputs_easy(self):
"""
The hypothesis should simplify to single interval of one of the 2 variables.
"""
function = f.many_inputs_bit_more_complex
code_abstract_solution = ["def {}(a, b, c, d, e, f):".format(function.__name__),
" return ({var}>=0.5 and {var}<=2.5)"]
r = Rules()
r.add(a=0, b=0, c=0, d=0, e=0, f=0, output=0)
r.add(a=1, b=1, c=1, d=1, e=1, f=1, output=1)
r.add(a=2, b=2, c=2, d=2, e=2, f=2, output=1)
r.add(a=3, b=3, c=3, d=3, e=3, f=3, output=0)
solution = r.solve(function)
variables = ['a', 'b', 'c', 'd', 'e', 'f', ]
for var in variables:
try:
code = [code_abstract_solution[0], code_abstract_solution[1].format(var=var)]
self.assertEqual(solution.implementation, code)
return # happy ending
except AssertionError:
pass # still nothing
raise Exception #
def test_2_integer_inputs(self):
"""
The hypothesis that solves this problem is a perfect square on the plane with coordinates (a, b)
"""
function = f.two_inputs_bit_more_complex
code = ["def {}(a, b):".format(function.__name__),
" return (a>=1.0 and a<=2.0) and (b>=1.0 and b<=2.0)"]
r = Rules()
r.add(a=1, b=0, output=0)
r.add(a=0, b=1, output=0)
r.add(a=1, b=1, output=1)
r.add(a=2, b=2, output=1)
r.add(a=3, b=2, output=0)
r.add(a=2, b=3, output=0)
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
def test_2_integer_inputs_variant(self):
"""
Variant of the test above. It is no longer a square.
"""
function = f.two_inputs_bit_more_complex
code = ["def {}(a, b):".format(function.__name__),
" return (b>=1.0 and b<=2.0) and ((a>=1.5 and a<=2.0) or a<=0.5)"]
r = Rules()
r.add(a=1, b=0, output=0)
r.add(a=0, b=1, output=1)
r.add(a=1, b=1, output=0)
r.add(a=2, b=2, output=1)
r.add(a=3, b=2, output=0)
r.add(a=2, b=3, output=0)
solution = r.solve(function)
self.assertEqual(solution.implementation, code)
def test_2_integer_inputs_bit_more_complex(self):
"""
Here the QM simplification is tested. There are 2 right solutions.
"""
function = f.two_inputs_bit_more_complex
code_solution_1 = ["def {}(a, b):".format(function.__name__),
" return (b>=2.5 and b<=5.5) or a<=1.5"]
code_solution_2 = ["def {}(a, b):".format(function.__name__),
" return (b>=2.5 and b<=5.5) or b<=1.5"]
r = Rules()
r.add(a=4, b=6, output=0)
r.add(a=5, b=5, output=1)
r.add(a=6, b=4, output=1)
r.add(a=3, b=3, output=1)
r.add(a=2, b=2, output=0)
r.add(a=1, b=1, output=1)
solution = r.solve(function)
# Tries 2 valid solutions.
try:
self.assertEqual(solution.implementation, code_solution_1)
except AssertionError:
self.assertEqual(solution.implementation, code_solution_2)
def test_sklearn_iris_data_set(self):
"""
Should generate a hypothesis for the sklearn iris data-set with low test error.
"""
iris = datasets.load_iris()
x = iris.data
y = iris.target
data_frame = pd.DataFrame(x, columns=['x1', 'x2', 'x3', 'x4'])
# Make binary and add to df
data_frame[KEYWORDS[OUTPUT]] = [int(bool(e)) for e in y]
# TODO: solve for the other classes: How to admit less than perfect solutions? introduce max_error, or timeout?
#data_frame[KEYWORDS[OUTPUT]] = [int(abs(e-1)) for e in y]
#data_frame[KEYWORDS[OUTPUT]] = [int(bool(abs(e-2))) for e in y]
function = f.solve_iris
code_solution_1 = ["def {}(x1, x2, x3, x4):".format(function.__name__),
" return x3 >= 2.45"]
r = Rules(data_frame)
solution = r.solve(function)
self.assertEqual(solution.implementation, code_solution_1)
if __name__ == '__main__':
unittest.main()
| mit |
maheshakya/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
alexmojaki/odo | odo/__init__.py | 3 | 2109 | from __future__ import absolute_import, division, print_function
try:
import h5py # h5py has precedence over pytables
except:
pass
from multipledispatch import halt_ordering, restart_ordering
halt_ordering() # Turn off multipledispatch ordering
from .utils import ignoring
from .convert import convert
from .append import append
from .resource import resource
from .directory import Directory
from .into import into
from .odo import odo
from .drop import drop
from .temp import Temp
from .backends.text import TextFile
from .chunks import chunks, Chunks
from datashape import discover, dshape
import numpy as np
with ignoring(ImportError):
from .backends.sas import sas7bdat
with ignoring(ImportError):
from .backends.pandas import pd
with ignoring(ImportError):
from .backends.bcolz import bcolz
with ignoring(ImportError):
from .backends.h5py import h5py
with ignoring(ImportError):
from .backends.hdfstore import HDFStore
with ignoring(ImportError):
from .backends.pytables import PyTables
with ignoring(ImportError):
from .backends.dynd import nd
with ignoring(ImportError):
from .backends import sql
with ignoring(ImportError):
from .backends import mongo
with ignoring(ImportError):
from .backends.csv import CSV
with ignoring(ImportError):
from .backends.json import JSON, JSONLines
with ignoring(ImportError):
from .backends.hdfs import HDFS
with ignoring(ImportError):
from .backends.ssh import SSH
with ignoring(ImportError):
from .backends import sql_csv
with ignoring(ImportError):
from .backends.aws import S3
with ignoring(ImportError):
from .backends import sql_csv
with ignoring(ImportError):
from .backends.bokeh import ColumnDataSource
with ignoring(ImportError):
from .backends.spark import RDD
with ignoring(ImportError):
from .backends.sparksql import SparkDataFrame
with ignoring(ImportError):
from .backends.url import URL
restart_ordering() # Restart multipledispatch ordering and do ordering
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| bsd-3-clause |
iandriver/RNA-sequence-tools | RNA_Seq_analysis/corr_search.py | 2 | 8067 | import cPickle as pickle
import numpy as np
import pandas as pd
import scipy
import os
import matplotlib.pyplot as plt
import matplotlib.pylab as pl
from operator import itemgetter
from matplotlib.ticker import LinearLocator
import itertools
#***fill in these variables***
#base path to pickle files with fpkm or count matrix
path_to_file = '/Volumes/Seq_data/cuffnorm_sca_spc_1'
#for labeling all output files
base_name = 'sca_spc_1'
#gene to search
term_to_search =raw_input('Enter gene name to search correlation:')
#if you need to run a new correlation (can take a while)
run_corr = True
#difine the threshold for significant correlation (0-1 one being perfect correlation)
sig_threshold =0.5
#define correlation method options are: 'pearson', 'kendall', 'spearman'
method_name = 'pearson'
#Minimum number of observations required per pair of columns to have a valid result. Currently only available for pearson and spearman correlation.
min_period = 3
#if you want the correlation plot to be sorted by expression of the searched gene
plot_sort = True
#if you want the plot to be plotted on log2 scale
plot_log = False
#if you want save a new significant correlation file (pickle)
save_new_sig = True
#if new matrix term file exists to update from and you want that gene list to be used for correlations
make_go_matrix = False
#name of file containing gene list (must have genes under column 'GeneID')
gene_file_source = 'go_search_genes_lung_all.txt'
#set inclusion/exclusion criteria in make_new_matrix function prior to activating
exclude =False
#can rank genes by category separation (single gene clustering) if true define categories
#in find_gen_rank function
rank = False
#***only edit find_gen_rank categories***
#load file gene
by_cell = pd.DataFrame.from_csv(os.path.join(path_to_file, base_name+'_outlier_filtered.txt'), sep='\t')
by_gene = by_cell.transpose()
#create list of genes
gene_list = by_cell.index.tolist()
#create cell list
cell_list = [x for x in list(by_cell.columns.values)]
df_by_gene1 = pd.DataFrame(by_gene, columns=gene_list, index=cell_list)
df_by_cell1 = pd.DataFrame(by_cell, columns=cell_list, index=gene_list)
print df_by_gene1
def make_new_matrix(org_matrix_by_cell, gene_list_file):
split_on='_'
gene_df = pd.read_csv(os.path.join(path_to_file, gene_list_file), delimiter= '\t')
gene_list = gene_df['GeneID'].tolist()
group_list = gene_df['GroupID'].tolist()
gmatrix_df = org_matrix_by_cell[gene_list]
cmatrix_df = gmatrix_df.transpose()
cell_list1 = []
for cell in cmatrix_df.columns.values:
if exclude:
if cell.split(split_on)[1] == 'ctrl' or cell.split(split_on)[1] == 'pnx':
if cell.split(split_on)[2][0] =='C':
print cell, 'cell'
cell_list1.append(cell)
else:
cell_list1.append(cell)
new_cmatrix_df = cmatrix_df[cell_list1]
new_gmatrix_df = new_cmatrix_df.transpose()
return new_cmatrix_df, new_gmatrix_df
if make_go_matrix:
df_by_cell, df_by_gene = make_new_matrix(df_by_gene1, gene_file_source)
else:
df_by_cell, df_by_gene = df_by_cell1, df_by_gene1
#run correlation matrix and save only those above threshold
if run_corr:
if method_name != 'kendall':
corr_by_gene = df_by_gene.corr(method=method_name, min_periods=min_period)
else:
corr_by_gene = df_by_gene.corr(method=method_name)
corr_by_cell = df_by_cell.corr()
cor = corr_by_gene
cor.loc[:,:] = np.tril(cor.values, k=-1)
cor = cor.stack()
sig_corr_pos = cor[cor >=sig_threshold]
sig_corr_neg = cor[cor <=(sig_threshold*-1)]
with open(os.path.join(path_to_file,'gene_correlations_sig_neg_'+method_name+'.p'), 'wb') as fp:
pickle.dump(sig_corr_neg, fp)
with open(os.path.join(path_to_file,'gene_correlations_sig_pos_'+method_name+'.p'), 'wb') as fp0:
pickle.dump(sig_corr_pos, fp0)
with open(os.path.join(path_to_file,'by_gene_corr.p'), 'wb') as fp1:
pickle.dump(corr_by_gene, fp1)
with open(os.path.join(path_to_file,'by_cell_corr.p'), 'wb') as fp2:
pickle.dump(corr_by_cell, fp2)
corr_by_gene_pos = open(os.path.join(path_to_file,'gene_correlations_sig_pos_'+method_name+'.p'), 'rb')
corr_by_gene_neg = open(os.path.join(path_to_file,'gene_correlations_sig_neg_'+method_name+'.p'), 'rb')
cor_pos = pickle.load(corr_by_gene_pos)
cor_neg = pickle.load(corr_by_gene_neg)
cor_pos_df = pd.DataFrame(cor_pos)
cor_neg_df = pd.DataFrame(cor_neg)
sig_corr = cor_pos_df.append(cor_neg_df)
sig_corrs = pd.DataFrame(sig_corr[0], columns=["corr"])
if save_new_sig:
sig_corrs.to_csv(os.path.join(path_to_file, base_name+'_counts_corr_sig_'+method_name+'.txt'), sep = '\t')
def find_gen_rank(g, split_on='_', pos=1, cat_name=['d4pnx', 'ctrl']):
sorted_df = by_cell.sort([g])
score_on = 'd4pnx'
g_df = sorted_df[g]
ranked_cells = sorted_df.index.values
ranked_cat = [x.split(split_on)[pos] for x in ranked_cells]
div_by = int(len(ranked_cat)/len(cat_name))
start = div_by *(len(cat_name)-1)
score1 = len([x for x in ranked_cat[start:len(ranked_cat)] if x == score_on])
tot = len([x for x in ranked_cat if x == score_on])
res_score = float(score1)/float(tot)
return "%.2f" % res_score
#corr_plot finds and plots all correlated genes, log turns on log scale, sort plots the genes in the rank order of the gene searched
def corr_plot(term_to_search, log=plot_log, sort=plot_sort):
plt.clf()
corr_tup = [(term_to_search, 1)]
neg = True
fig, ax = plt.subplots()
marker = itertools.cycle(('+', 'o', '*'))
linestyles = itertools.cycle(('--', '-.', '-', ':'))
for index, row in sig_corrs.iterrows():
if term_to_search in index:
neg = False
if index[0]==term_to_search:
corr_tup.append((index[1],row['corr']))
else:
corr_tup.append((index[0],row['corr']))
if neg:
print term_to_search+' not correlated.'
corr_tup.sort(key=itemgetter(1), reverse=True)
corr_df = pd.DataFrame(corr_tup, columns=['GeneID', 'Correlation'])
corr_df.to_csv(os.path.join(path_to_file, 'Corr_w_'+term_to_search+'_list.txt'), sep = '\t', index=False)
for c in corr_tup:
print c
to_plot = [x[0] for x in corr_tup]
sorted_df = df_by_gene.sort([term_to_search])
log2_df = np.log2(df_by_gene[to_plot])
sorted_log2_df=np.log2(sorted_df[to_plot])
ylabel='Counts'
if sort and log:
ax = sorted_log2_df.plot()
xlabels = sorted_log2_df[to_plot].index.values
elif sort:
ax =sorted_df[to_plot].plot()
xlabels = sorted_df[to_plot].index.values
elif log:
ax = log2_df.plot()
ylabel= 'log2 FPKM'
xlabels = log2_df.index.values
else:
ax = df_by_gene[to_plot].plot()
xlabels = df_by_gene[to_plot].index.values
ax.set_xlabel('Cell #')
ax.set_ylabel(ylabel)
if rank:
ax.set_title('Correlates with '+term_to_search+'. Percent seperate PNX: '+find_gen_rank(term_to_search))
else:
ax.set_title('Correlates with '+term_to_search)
ax.xaxis.set_minor_locator(LinearLocator(numticks=len(xlabels)))
ax.set_xticklabels(xlabels, minor=True, rotation='vertical', fontsize=6)
ax.set_ylim([0, df_by_gene[to_plot].values.max()])
ax.tick_params(axis='x', labelsize=1)
if len(corr_tup) > 15:
l_labels = [str(x[0])+' '+"%.2f" % x[1] for x in corr_tup]
ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, 1.05), ncol=6, prop={'size':6})
else:
l_labels = [str(x[0])+' '+"%.2f" % x[1] for x in corr_tup]
ax.legend(l_labels, loc='upper left', bbox_to_anchor=(0.01, 1.05), ncol=4, prop={'size':8})
fig = plt.gcf()
fig.subplots_adjust(bottom=0.08, top=0.95, right=0.98, left=0.03)
plt.savefig(os.path.join(path_to_file, base_name+'_corr_with_'+term_to_search), bbox_inches='tight')
plt.show()
corr_plot(term_to_search)
corr_by_gene_pos.close()
corr_by_gene_neg.close()
| mit |
mayblue9/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
trungnt13/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
kristoforcarlson/nest-simulator-fork | topology/doc/user_manual_scripts/connections.py | 9 | 18038 | # -*- coding: utf-8 -*-
#
# connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# create connectivity figures for topology manual
import nest
import nest.topology as tp
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks == None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0]-ext[0]/2.+dx/2. + dx*np.arange(top['columns'])
yticks = ctr[1]-ext[1]/2.+dy/2. + dy*np.arange(top['rows'])
if xlim == None:
xlim = [ctr[0]-ext[0]/2.-dx/2., ctr[0]+ext[0]/2.+dx/2.] # extra space so extent is visible
ylim = [ctr[1]-ext[1]/2.-dy/2., ctr[1]+ext[1]/2.+dy/2.]
else:
ext = [xlim[1]-xlim[0], ylim[1]-ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
def conn_figure(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5,6),yticks=range(-5,6),
xlim=[-5.5,5.5],ylim=[-5.5,5.5]):
if targets==None:
targets=((tp.FindCenterElement(layer),'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=60)
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=20,
kernel_color='green')
beautify_layer(layer, fig,
xlim=xlim,ylim=ylim,xticks=xticks,yticks=yticks,
xlabel='', ylabel='')
fig.gca().grid(False)
# -----------------------------------------------
# Simple connection
#{ conn1 #}
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.,-1.],
'upper_right': [ 2., 1.]}}}
tp.ConnectLayers(l, l, conndict)
#{ end #}
fig = plt.figure()
fig.add_subplot(121)
conn_figure(fig, l, conndict,
targets=((tp.FindCenterElement(l),'red'),
(tp.FindNearestElement(l, [4.,5.]),'yellow')))
# same another time, with periodic bcs
lpbc = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron', 'edge_wrap': True})
tp.ConnectLayers(lpbc, lpbc, conndict)
fig.add_subplot(122)
conn_figure(fig, lpbc, conndict, showmask=False,
targets=((tp.FindCenterElement(lpbc),'red'),
(tp.FindNearestElement(lpbc, [4.,5.]),'yellow')))
plt.savefig('../user_manual_figures/conn1.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def free_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2))
fig = plt.figure()
#{ conn2r #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.,-1.],
'upper_right': [ 2., 1.]}}}
#{ end #}
free_mask_fig(fig, 231, conndict)
#{ conn2ro #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left' : [-2.,-1.],
'upper_right': [ 2., 1.]},
'anchor': [-1.5, -1.5]}}
#{ end #}
free_mask_fig(fig, 234, conndict)
#{ conn2c #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0}}}
#{ end #}
free_mask_fig(fig, 232, conndict)
#{ conn2co #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0},
'anchor': [-2.0,0.0]}}
#{ end #}
free_mask_fig(fig, 235, conndict)
#{ conn2d #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.}}}
#{ end #}
free_mask_fig(fig, 233, conndict)
#{ conn2do #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.},
'anchor': [1.5,1.5]}}
#{ end #}
free_mask_fig(fig, 236, conndict)
plt.savefig('../user_manual_figures/conn2.png', bbox_inches='tight')
# -----------------------------------------------
# 3d masks
def conn_figure_3d(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5,6),yticks=range(-5,6),
xlim=[-5.5,5.5],ylim=[-5.5,5.5]):
if targets==None:
targets=((tp.FindCenterElement(layer),'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=20, nodecolor=(.5,.5,1.))
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=60,
kernel_color='green')
ax = fig.gca()
ax.set_aspect('equal', 'box')
plt.draw()
def free_mask_3d_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'layers': 11, 'extent': [11.,11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc,projection='3d')
conn_figure_3d(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2))
fig = plt.figure()
#{ conn_3d_a #}
conndict = {'connection_type': 'divergent',
'mask': {'box': {'lower_left' : [-2.,-1.,-1.],
'upper_right': [ 2., 1., 1.]}}}
#{ end #}
free_mask_3d_fig(fig, 121, conndict)
#{ conn_3d_b #}
conndict = {'connection_type': 'divergent',
'mask': {'spherical': {'radius': 2.5}}}
#{ end #}
free_mask_3d_fig(fig, 122, conndict)
plt.savefig('../user_manual_figures/conn_3d.png', bbox_inches='tight')
# -----------------------------------------------
# grid masks
def grid_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2),
showmask=False)
fig = plt.figure()
#{ conn3 #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5}}}
#{ end #}
grid_mask_fig(fig, 131, conndict)
#{ conn3c #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': 1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 132, conndict)
#{ conn3x #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': -1, 'column': 2}}}
#{ end #}
grid_mask_fig(fig, 133, conndict)
plt.savefig('../user_manual_figures/conn3.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def kernel_fig(fig, loc, cdict, showkern=True):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11.,11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5,6,2), yticks=range(-5,6,2),
showkern=showkern)
fig = plt.figure()
#{ conn4cp #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': 0.5}
#{ end #}
kernel_fig(fig, 231, conndict)
#{ conn4g #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.}}}
#{ end #}
kernel_fig(fig, 232, conndict)
#{ conn4gx #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}, 'anchor': [1.5,1.5]},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'anchor': [1.5,1.5]}}}
#{ end #}
kernel_fig(fig, 233, conndict)
plt.draw()
#{ conn4cut #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'cutoff': 0.5}}}
#{ end #}
kernel_fig(fig, 234, conndict)
#{ conn42d #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian2D': {'p_center': 1.0,
'sigma_x': 1., 'sigma_y': 3.}}}
#{ end #}
kernel_fig(fig, 235, conndict, showkern=False)
plt.savefig('../user_manual_figures/conn4.png', bbox_inches='tight')
# -----------------------------------------------
import numpy as np
def wd_fig(fig, loc, ldict, cdict, what, rpos=None,
xlim=[-1,51], ylim=[0,1], xticks=range(0,51,5),
yticks=np.arange(0.,1.1,0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
if rpos == None:
rn = nest.GetLeaves(l)[0][:1] # first node
else:
rn = tp.FindNearestElement(l, rpos)
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
vals = np.array([sd[what] for sd in cstat])
tgts = [sd['target'] for sd in cstat]
locs = np.array(tp.GetPosition(tgts))
ax.plot(locs[:,0], vals, 'o', mec='none', mfc=clr, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
fig = plt.figure()
#{ conn5lin #}
ldict = {'rows': 1, 'columns': 51,
'extent': [51.,1.], 'center': [25.,0.],
'elements': 'iaf_neuron'}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 311, ldict, cdict, 'weight', label='Weight')
wd_fig(fig, 311, ldict, cdict, 'delay' , label='Delay', clr='red')
fig.gca().legend()
lpdict = {'rows': 1, 'columns': 51, 'extent': [51.,1.], 'center': [25.,0.],
'elements': 'iaf_neuron', 'edge_wrap': True}
#{ conn5linpbc #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
#{ end #}
wd_fig(fig, 312, lpdict, cdict, 'weight', label='Weight')
wd_fig(fig, 312, lpdict, cdict, 'delay' , label='Delay', clr='red')
fig.gca().legend()
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Linear',
rpos=[25.,0.], clr='orange')
#{ conn5exp #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'exponential': {'a': 1., 'tau': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Exponential',
rpos=[25.,0.])
#{ conn5gauss #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'gaussian': {'p_center': 1., 'sigma': 5.}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Gaussian', clr='green',rpos=[25.,0.])
#{ conn5uniform #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5,-0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'uniform': {'min': 0.2, 'max': 0.8}}}
#{ end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Uniform', clr='red',rpos=[25.,0.])
fig.gca().legend()
plt.savefig('../user_manual_figures/conn5.png', bbox_inches='tight')
# --------------------------------
def pn_fig(fig, loc, ldict, cdict,
xlim=[0.,.5], ylim=[0,3.5], xticks=range(0,51,5),
yticks=np.arange(0.,1.1,0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
rn = nest.GetLeaves(l)[0]
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
srcs = [sd['source'] for sd in cstat]
tgts = [sd['target'] for sd in cstat]
dist = np.array(tp.Distance(srcs,tgts))
ax.hist(dist, bins=50, histtype='stepfilled',normed=True)
r=np.arange(0.,0.51,0.01)
plt.plot(r, 2*np.pi*r*(1-2*r)*12/np.pi,'r-',lw=3,zorder=-10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
"""ax.set_xticks(xticks)
ax.set_yticks(yticks)"""
# ax.set_aspect(100, 'box')
ax.set_xlabel('Source-target distance d')
ax.set_ylabel('Connection probability pconn(d)')
fig = plt.figure()
#{ conn6 #}
pos = [[np.random.uniform(-1.,1.),np.random.uniform(-1.,1.)]
for j in range(1000)]
ldict = {'positions': pos, 'extent': [2.,2.],
'elements': 'iaf_neuron', 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.0}},
'kernel': {'linear': {'c': 1., 'a': -2., 'cutoff': 0.0}},
'number_of_connections': 50,
'allow_multapses': True, 'allow_autapses': False}
#{ end #}
pn_fig(fig, 111, ldict, cdict)
plt.savefig('../user_manual_figures/conn6.png', bbox_inches='tight')
# -----------------------------
#{ conn7 #}
nest.ResetKernel()
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'}}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2,-0.2],
'upper_right':[0.2,0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'}}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn8 #}
nest.ResetKernel()
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
nest.CopyModel('static_synapse', 'exc', {'weight': 2.0})
nest.CopyModel('static_synapse', 'inh', {'weight': -8.0})
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'synapse_model': 'exc'}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2,-0.2],
'upper_right':[0.2,0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'},
'synapse_model': 'inh'}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
#{ end #}
# ----------------------------
#{ conn9 #}
nrns = tp.CreateLayer({'rows' : 20,
'columns' : 20,
'elements': 'iaf_neuron'})
stim = tp.CreateLayer({'rows' : 1,
'columns' : 1,
'elements': 'poisson_generator'})
cdict_stim = {'connection_type': 'divergent',
'mask' : {'circular': {'radius': 0.1},
'anchor': [0.2, 0.2]}}
tp.ConnectLayers(stim, nrns, cdict_stim)
#{ end #}
# ----------------------------
#{ conn10 #}
rec = tp.CreateLayer({'rows' : 1,
'columns' : 1,
'elements': 'spike_detector'})
cdict_rec = {'connection_type': 'convergent',
'mask' : {'circular': {'radius': 0.1},
'anchor': [-0.2, 0.2]}}
tp.ConnectLayers(nrns, rec, cdict_rec)
#{ end #}
| gpl-2.0 |
gkunter/coquery | test/test_functionlist.py | 1 | 6558 | # -*- coding: utf-8 -*-
"""
This module tests the functionlist module.
Run it like so:
coquery$ python -m test.test_functionlist
"""
from __future__ import unicode_literals
import warnings
import pandas as pd
from argparse import Namespace
import logging
from coquery.functionlist import FunctionList
from coquery.functions import Function, StringChain, StringLength
from coquery import options
from test.testcase import CoqTestCase, run_tests
class BreakFunction(StringLength):
_name = "BREAK"
def evaluate(*args, **kwargs):
raise RuntimeError
class TestFunctionList(CoqTestCase):
def setUp(self):
options.cfg = Namespace()
options.cfg.drop_on_na = False
options.cfg.benchmark = False
def test_get_list(self):
func1 = Function(columns=["col1", "col2"], value="x")
func2 = Function(columns=["col3", "col4"], value="y")
f_list = FunctionList([func1, func2])
self.assertListEqual(f_list.get_list(), [func1, func2])
def test_set_list(self):
func1 = Function(columns=["col1", "col2"], value="x")
func2 = Function(columns=["col3", "col4"], value="y")
f_list = FunctionList()
self.assertListEqual(f_list.get_list(), [])
f_list.set_list([func1, func2])
self.assertListEqual(f_list.get_list(), [func1, func2])
def test_find_function(self):
func1 = Function(columns=["col1", "col2"], value="x")
func2 = Function(columns=["col3", "col4"], value="y")
func3 = Function(columns=["col5", "col6"], value="z")
f_list = FunctionList([func1, func2, func3])
self.assertEqual(f_list.find_function(func1.get_id()), func1)
self.assertEqual(f_list.find_function(func2.get_id()), func2)
self.assertEqual(f_list.find_function(func3.get_id()), func3)
def test_find_function_invalid(self):
func1 = Function(columns=["col1", "col2"], value="x")
func2 = Function(columns=["col3", "col4"], value="y")
func3 = Function(columns=["col5", "col6"], value="z")
f_list = FunctionList([func1, func3])
self.assertEqual(f_list.find_function(func1.get_id()), func1)
self.assertEqual(f_list.find_function(func2.get_id()), None)
self.assertEqual(f_list.find_function(func3.get_id()), func3)
def test_add_function(self):
func1 = Function(columns=["col1", "col2"], value="x")
func2 = Function(columns=["col3", "col4"], value="y")
func3 = Function(columns=["col5", "col6"], value="z")
f_list = FunctionList([])
f_list.add_function(func1)
self.assertEqual(f_list.get_list(), [func1])
f_list.add_function(func2)
self.assertEqual(f_list.get_list(), [func1, func2])
f_list.add_function(func3)
self.assertEqual(f_list.get_list(), [func1, func2, func3])
def test_add_function_duplicate(self):
func1 = Function(columns=["col1", "col2"], value="x")
func2 = Function(columns=["col3", "col4"], value="y")
f_list = FunctionList([])
f_list.add_function(func1)
self.assertEqual(f_list.get_list(), [func1])
f_list.add_function(func2)
self.assertEqual(f_list.get_list(), [func1, func2])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f_list.add_function(func1)
self.assertEqual(f_list.get_list(), [func1, func2])
def test_has_function(self):
func1 = Function(columns=["col1", "col2"], value="x")
func2 = Function(columns=["col3", "col4"], value="y")
func3 = Function(columns=["col5", "col6"], value="z")
f_list = FunctionList([func1, func3])
self.assertTrue(f_list.has_function(func1))
self.assertFalse(f_list.has_function(func2))
self.assertTrue(f_list.has_function(func3))
def test_remove_function(self):
func1 = Function(columns=["col1", "col2"], value="x")
func2 = Function(columns=["col3", "col4"], value="y")
func3 = Function(columns=["col5", "col6"], value="z")
f_list = FunctionList([func1, func2, func3])
f_list.remove_function(func2)
self.assertListEqual(f_list.get_list(), [func1, func3])
f_list.remove_function(func3)
self.assertListEqual(f_list.get_list(), [func1])
f_list.remove_function(func1)
self.assertListEqual(f_list.get_list(), [])
def test_replace_function(self):
func1 = Function(columns=["col1", "col2"], value="x")
func2 = Function(columns=[func1.get_id()], value="y")
func3 = Function(columns=["col5", "col6"], value="z")
f_list = FunctionList([func1, func2])
f_list.replace_function(func1, func3)
self.assertListEqual(f_list.get_list(), [func3, func2])
self.assertEqual(
f_list.find_function(func2.get_id()).columns,
[func3.get_id()])
def test_lapply(self):
df = pd.DataFrame(
{"coq_word_label_1": ["abc"] * 3 + ["x"] * 2,
"coq_word_label_2": ["a"] * 4 + [None]})
func1 = StringChain(columns=["coq_word_label_1",
"coq_word_label_2"])
func2 = StringLength(columns=[func1.get_id()])
f_list = FunctionList([func1, func2])
df = f_list.lapply(df)
self.assertEqual(list(df[func2.get_id()].values),
[4, 4, 4, 2, 1])
def test_lapply_exception(self):
df = pd.DataFrame(
{"coq_word_label_1": ["abc"] * 3 + ["x"] * 2,
"coq_word_label_2": ["a"] * 4 + [None]})
func1 = StringChain(columns=["coq_word_label_1",
"coq_word_label_2"])
breaking = BreakFunction(columns=[func1.get_id()])
func3 = StringLength(columns=[func1.get_id()])
f_list = FunctionList([func1, breaking, func3])
logging.disable(logging.ERROR)
df = f_list.lapply(df)
logging.disable(logging.NOTSET)
self.assertTrue(len(f_list.exceptions()) == 1)
self.assertTrue(func1.get_id() in df.columns)
self.assertTrue(func3.get_id() in df.columns)
pd.np.testing.assert_array_equal(
df[func3.get_id()].values, [4, 4, 4, 2, 1])
self.assertTrue(breaking.get_id() in df.columns)
pd.np.testing.assert_array_equal(
df[breaking.get_id()].values, [None] * len(df))
provided_tests = [TestFunctionList]
def main():
run_tests(provided_tests)
if __name__ == '__main__':
main()
| gpl-3.0 |
kdmurray91/scikit-bio | skbio/stats/ordination/_canonical_correspondence_analysis.py | 3 | 8409 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from scipy.linalg import svd, lstsq
from ._ordination_results import OrdinationResults
from ._utils import corr, svd_rank, scale
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def cca(y, x, scaling=1):
r"""Compute canonical (also known as constrained) correspondence
analysis.
Canonical (or constrained) correspondence analysis is a
multivariate ordination technique. It appeared in community
ecology [1]_ and relates community composition to the variation in
the environment (or in other factors). It works from data on
abundances or counts of samples and constraints variables,
and outputs ordination axes that maximize sample separation among species.
It is better suited to extract the niches of taxa than linear
multivariate methods because it assumes unimodal response curves
(habitat preferences are often unimodal functions of habitat
variables [2]_).
As more environmental variables are added, the result gets more
similar to unconstrained ordination, so only the variables that
are deemed explanatory should be included in the analysis.
Parameters
----------
y : DataFrame
Samples by features table (n, m)
x : DataFrame
Samples by constraints table (n, q)
scaling : int, {1, 2}, optional
Scaling type 1 maintains :math:`\chi^2` distances between rows.
Scaling type 2 preserver :math:`\chi^2` distances between columns.
For a more detailed explanation of the interpretation, check Legendre &
Legendre 1998, section 9.4.3.
Returns
-------
OrdinationResults
Object that stores the cca results.
Raises
------
ValueError
If `x` and `y` have different number of rows
If `y` contains negative values
If `y` contains a row of only 0's.
NotImplementedError
If scaling is not 1 or 2.
See Also
--------
ca
rda
OrdinationResults
Notes
-----
The algorithm is based on [3]_, \S 11.2, and is expected to give
the same results as ``cca(y, x)`` in R's package vegan, except
that this implementation won't drop constraining variables due to
perfect collinearity: the user needs to choose which ones to
input.
Canonical *correspondence* analysis shouldn't be confused with
canonical *correlation* analysis (CCorA, but sometimes called
CCA), a different technique to search for multivariate
relationships between two datasets. Canonical correlation analysis
is a statistical tool that, given two vectors of random variables,
finds linear combinations that have maximum correlation with each
other. In some sense, it assumes linear responses of "species" to
"environmental variables" and is not well suited to analyze
ecological data.
References
----------
.. [1] Cajo J. F. Ter Braak, "Canonical Correspondence Analysis: A
New Eigenvector Technique for Multivariate Direct Gradient
Analysis", Ecology 67.5 (1986), pp. 1167-1179.
.. [2] Cajo J.F. Braak and Piet F.M. Verdonschot, "Canonical
correspondence analysis and related multivariate methods in
aquatic ecology", Aquatic Sciences 57.3 (1995), pp. 255-289.
.. [3] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
Y = y.as_matrix()
X = x.as_matrix()
# Perform parameter sanity checks
if X.shape[0] != Y.shape[0]:
raise ValueError("The samples by features table 'y' and the samples by"
" constraints table 'x' must have the same number of "
" rows. 'y': {0} 'x': {1}".format(X.shape[0],
Y.shape[0]))
if Y.min() < 0:
raise ValueError(
"The samples by features table 'y' must be nonnegative")
row_max = Y.max(axis=1)
if np.any(row_max <= 0):
# Or else the lstsq call to compute Y_hat breaks
raise ValueError("The samples by features table 'y' cannot contain a "
"row with only 0's")
if scaling not in {1, 2}:
raise NotImplementedError(
"Scaling {0} not implemented.".format(scaling))
# Step 1 (similar to Pearson chi-square statistic)
grand_total = Y.sum()
Q = Y / grand_total # Relative frequencies of Y (contingency table)
# Features and sample weights (marginal totals)
column_marginals = Q.sum(axis=0)
row_marginals = Q.sum(axis=1)
# Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's an
# scaled version of the contribution of each cell towards Pearson
# chi-square statistic.
expected = np.outer(row_marginals, column_marginals)
Q_bar = (Q - expected) / np.sqrt(expected)
# Step 2. Standardize columns of X with respect to sample weights,
# using the maximum likelihood variance estimator (Legendre &
# Legendre 1998, p. 595)
X = scale(X, weights=row_marginals, ddof=0)
# Step 3. Weighted multiple regression.
X_weighted = row_marginals[:, None]**0.5 * X
B, _, rank_lstsq, _ = lstsq(X_weighted, Q_bar)
Y_hat = X_weighted.dot(B)
Y_res = Q_bar - Y_hat
# Step 4. Eigenvalue decomposition
u, s, vt = svd(Y_hat, full_matrices=False)
rank = svd_rank(Y_hat.shape, s)
s = s[:rank]
u = u[:, :rank]
vt = vt[:rank]
U = vt.T
# Step 5. Eq. 9.38
U_hat = Q_bar.dot(U) * s**-1
# Residuals analysis
u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
rank = svd_rank(Y_res.shape, s_res)
s_res = s_res[:rank]
u_res = u_res[:, :rank]
vt_res = vt_res[:rank]
U_res = vt_res.T
U_hat_res = Y_res.dot(U_res) * s_res**-1
eigenvalues = np.r_[s, s_res]**2
# Scalings (p. 596 L&L 1998):
# feature scores, scaling 1
V = (column_marginals**-0.5)[:, None] * U
# sample scores, scaling 2
V_hat = (row_marginals**-0.5)[:, None] * U_hat
# sample scores, scaling 1
F = V_hat * s
# feature scores, scaling 2
F_hat = V * s
# Sample scores which are linear combinations of constraint
# variables
Z_scaling1 = ((row_marginals**-0.5)[:, None] *
Y_hat.dot(U))
Z_scaling2 = Z_scaling1 * s**-1
# Feature residual scores, scaling 1
V_res = (column_marginals**-0.5)[:, None] * U_res
# Sample residual scores, scaling 2
V_hat_res = (row_marginals**-0.5)[:, None] * U_hat_res
# Sample residual scores, scaling 1
F_res = V_hat_res * s_res
# Feature residual scores, scaling 2
F_hat_res = V_res * s_res
eigvals = eigenvalues
if scaling == 1:
features_scores = np.hstack((V, V_res))
sample_scores = np.hstack((F, F_res))
sample_constraints = np.hstack((Z_scaling1, F_res))
elif scaling == 2:
features_scores = np.hstack((F_hat, F_hat_res))
sample_scores = np.hstack((V_hat, V_hat_res))
sample_constraints = np.hstack((Z_scaling2, V_hat_res))
biplot_scores = corr(X_weighted, u)
pc_ids = ['CCA%d' % (i+1) for i in range(len(eigenvalues))]
sample_ids = y.index
feature_ids = y.columns
eigvals = pd.Series(eigenvalues, index=pc_ids)
samples = pd.DataFrame(sample_scores,
columns=pc_ids, index=sample_ids)
features = pd.DataFrame(features_scores,
columns=pc_ids, index=feature_ids)
biplot_scores = pd.DataFrame(biplot_scores,
index=x.columns,
columns=pc_ids[:biplot_scores.shape[1]])
sample_constraints = pd.DataFrame(sample_constraints,
index=sample_ids, columns=pc_ids)
return OrdinationResults(
"CCA", "Canonical Correspondence Analysis", eigvals, samples,
features=features, biplot_scores=biplot_scores,
sample_constraints=sample_constraints,
proportion_explained=eigvals / eigvals.sum())
| bsd-3-clause |
sumspr/scikit-learn | sklearn/utils/fixes.py | 133 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
rseubert/scikit-learn | sklearn/linear_model/randomized_l1.py | 8 | 23178 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
argenta-web/argenta-web.github.io | MEFaplicado-html/porticos/codigos/resultadoPortico3nos5elems.py | 2 | 20283 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 14:46:37 2019
Vetor de cargas equivalentes a distrubuída da placa OK!
Carga de vento OK!!!
Resultados OK!
@author: markinho
"""
import sympy as sp
import numpy as np
from matplotlib import rcParams
rcParams['mathtext.fontset'] = 'stix'
rcParams['font.family'] = 'STIXGeneral'
import matplotlib.pyplot as plt
def rigidez_portico(E, A, I, scL):
'''
Matriz de rigidez do elemento de pórtico já com a rotação incorporada
Elemento de pórtico de 3 nos
E: módulo de elasticidade em kN/cm2
I: inércia da seção transversal em torno do eixo que sai do plano em cm4
A: área da seção transversal em cm2
scL: array numpy com os senos, cossenos e comprimentos das barras em cm
np.zeros(E.shape[0]): para extender os zeros
'''
s = scL[0]
c = scL[1]
L = scL[2]
return np.array([ [7*A*E*c**2/(3*L) + 5092*E*I*s**2/(35*L**3), 7*A*E*c*s/(3*L) - 5092*E*I*c*s/(35*L**3), -1138*E*I*s/(35*L**2), -8*A*E*c**2/(3*L) - 512*E*I*s**2/(5*L**3), -8*A*E*c*s/(3*L) + 512*E*I*c*s/(5*L**3), -384*E*I*s/(7*L**2), A*E*c**2/(3*L) - 1508*E*I*s**2/(35*L**3), A*E*c*s/(3*L) + 1508*E*I*c*s/(35*L**3), -242*E*I*s/(35*L**2)],
[ 7*A*E*c*s/(3*L) - 5092*E*I*c*s/(35*L**3), 7*A*E*s**2/(3*L) + 5092*E*I*c**2/(35*L**3), 1138*E*I*c/(35*L**2), -8*A*E*c*s/(3*L) + 512*E*I*c*s/(5*L**3), -8*A*E*s**2/(3*L) - 512*E*I*c**2/(5*L**3), 384*E*I*c/(7*L**2), A*E*c*s/(3*L) + 1508*E*I*c*s/(35*L**3), A*E*s**2/(3*L) - 1508*E*I*c**2/(35*L**3), 242*E*I*c/(35*L**2)],
[ -1138*E*I*s/(35*L**2), 1138*E*I*c/(35*L**2), 332*E*I/(35*L), 128*E*I*s/(5*L**2), -128*E*I*c/(5*L**2), 64*E*I/(7*L), 242*E*I*s/(35*L**2), -242*E*I*c/(35*L**2), 38*E*I/(35*L)],
[ -8*A*E*c**2/(3*L) - 512*E*I*s**2/(5*L**3), -8*A*E*c*s/(3*L) + 512*E*I*c*s/(5*L**3), 128*E*I*s/(5*L**2), 16*A*E*c**2/(3*L) + 1024*E*I*s**2/(5*L**3), 16*A*E*c*s/(3*L) - 1024*E*I*c*s/(5*L**3), 0, -8*A*E*c**2/(3*L) - 512*E*I*s**2/(5*L**3), -8*A*E*c*s/(3*L) + 512*E*I*c*s/(5*L**3), -128*E*I*s/(5*L**2)],
[ -8*A*E*c*s/(3*L) + 512*E*I*c*s/(5*L**3), -8*A*E*s**2/(3*L) - 512*E*I*c**2/(5*L**3), -128*E*I*c/(5*L**2), 16*A*E*c*s/(3*L) - 1024*E*I*c*s/(5*L**3), 16*A*E*s**2/(3*L) + 1024*E*I*c**2/(5*L**3), 0, -8*A*E*c*s/(3*L) + 512*E*I*c*s/(5*L**3), -8*A*E*s**2/(3*L) - 512*E*I*c**2/(5*L**3), 128*E*I*c/(5*L**2)],
[ -384*E*I*s/(7*L**2), 384*E*I*c/(7*L**2), 64*E*I/(7*L), 0, 0, 256*E*I/(7*L), 384*E*I*s/(7*L**2), -384*E*I*c/(7*L**2), 64*E*I/(7*L)],
[ A*E*c**2/(3*L) - 1508*E*I*s**2/(35*L**3), A*E*c*s/(3*L) + 1508*E*I*c*s/(35*L**3), 242*E*I*s/(35*L**2), -8*A*E*c**2/(3*L) - 512*E*I*s**2/(5*L**3), -8*A*E*c*s/(3*L) + 512*E*I*c*s/(5*L**3), 384*E*I*s/(7*L**2), 7*A*E*c**2/(3*L) + 5092*E*I*s**2/(35*L**3), 7*A*E*c*s/(3*L) - 5092*E*I*c*s/(35*L**3), 1138*E*I*s/(35*L**2)],
[ A*E*c*s/(3*L) + 1508*E*I*c*s/(35*L**3), A*E*s**2/(3*L) - 1508*E*I*c**2/(35*L**3), -242*E*I*c/(35*L**2), -8*A*E*c*s/(3*L) + 512*E*I*c*s/(5*L**3), -8*A*E*s**2/(3*L) - 512*E*I*c**2/(5*L**3), -384*E*I*c/(7*L**2), 7*A*E*c*s/(3*L) - 5092*E*I*c*s/(35*L**3), 7*A*E*s**2/(3*L) + 5092*E*I*c**2/(35*L**3), -1138*E*I*c/(35*L**2)],
[ -242*E*I*s/(35*L**2), 242*E*I*c/(35*L**2), 38*E*I/(35*L), -128*E*I*s/(5*L**2), 128*E*I*c/(5*L**2), 64*E*I/(7*L), 1138*E*I*s/(35*L**2), -1138*E*I*c/(35*L**2), 332*E*I/(35*L)]])
def angulos_comprimentos(nos, elementos):
'''
Função para calcular os senos e os cossenos de cada barra e o seu comprimento
no1: coordenadas do nó 1 em array([x, y])
no2: coordenadas do nó 2 em array([x, y])
retorna array com elementos na primeira dimensão e [sen, cos, comprimento] na segunda
'''
sen_cos_comp_comp = np.zeros( (elementos.shape[0], 3) )
no1 = nos[elementos[:,0]] #nós iniciais
no2 = nos[elementos[:,2]] #nós finais
sen_cos_comp_comp[:,2] = np.sqrt( (no2[:,0] - no1[:,0])**2 + (no2[:,1] - no1[:,1])**2) #comprimento
sen_cos_comp_comp[:,0] = (no2[:,1] - no1[:,1])/( sen_cos_comp_comp[:,2] ) #seno
sen_cos_comp_comp[:,1] = (no2[:,0] - no1[:,0])/( sen_cos_comp_comp[:,2] ) #cosseno
return sen_cos_comp_comp
GL = np.arange(0, 21).reshape(7,3)
#IE = np.array([[9, 7, 0],[0, 1, 2],[2, 3, 4],[4, 5, 6], [10, 8, 6]])
IE = np.array([ [9, 0, 1], [1, 2, 3], [3, 4, 5], [5, 6, 7], [10, 8, 7] ])
#nos = np.array([ [-470., 470.], [-410., 470.], [-350., 470.], [-150., 470.], [50., 470.], [260., 470.], [470., 470.],
# [-470., 235.], [470., 235.], [-470., 0.], [470, 0] ])
nos = np.array([ [0, 235], [0, 470], [60, 470], [120, 470], [320, 470], [520, 470], [730, 470], [940, 470], [940, 235], [0,0], [940, 0] ])
scL = angulos_comprimentos(nos, IE)
d = 20. #cm
t_w = 1.25 #cm
b_f = 40. #cm
t_f = 1.25 #cm
h = d - 2 * t_f
I_z = b_f*d**3/12 - (b_f-2*t_w)*h**3/12 #cm4
Ar = d*b_f - h*(b_f-2*t_w) #cm2
#matriz de rigidez dos elementos
Ke = []
for e in range(IE.shape[0]):
Ke.append(rigidez_portico(20000., Ar, I_z, scL[e])) #kN/cm2, cm2 e cm4
ID = []
for e in IE:
ID.append( [3*e[0], 3*e[0]+1, 3*e[0]+2, 3*e[1], 3*e[1]+1, 3*e[1]+2, 3*e[2], 3*e[2]+1, 3*e[2]+2] )
K = np.zeros((nos.shape[0]*3, nos.shape[0]*3))
for e in range(IE.shape[0]):
for i in range(9):
for j in range(9):
K[ ID[e][i], ID[e][j] ] += Ke[e][i, j]
dof = nos.shape[0]*3 - 6
Ku = K[:dof, :dof]
Kr = K[dof:, :dof]
##determinação das forças nodais equivalentes !!!!ERRO AQUI!!! ---------------------------------------------------------------------
#para viga
r = sp.Symbol('r')
s = sp.Symbol('s')
l = sp.Symbol('l')
x1 = -l/2
x2 = 0
x3 = l/2
u1 = sp.Symbol('u1')
u2 = sp.Symbol('u2')
u3 = sp.Symbol('u3')
u4 = sp.Symbol('u4')
u5 = sp.Symbol('u5')
u6 = sp.Symbol('u6')
Mat_Coef = sp.Matrix([[1, -l/2, l**2/4, -l**3/8, l**4/16, -l**5/32],
[0, 1, -l, 3*l**2/4, -l**3/2, 5*l**4/16],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[1, l/2, l**2/4, l**3/8, l**4/16, l**5/32],
[0, 1, l, 3*l**2/4, l**3/2, 5*l**4/16]])
U = sp.Matrix([u1, u2, u3, u4, u5, u6])
Coefs = Mat_Coef.inv() * U
A = Coefs[0]
B = Coefs[1]
C = Coefs[2]
D = Coefs[3]
E = Coefs[4]
F = Coefs[5]
Ns = sp.expand(A + B*r + C*r**2 + D*r**3 + E*r**4 + F*r**5)
N1 = sp.Add(*[argi for argi in Ns.args if argi.has(u1)]).subs(u1, 1)
N2 = sp.Add(*[argi for argi in Ns.args if argi.has(u2)]).subs(u2, 1)
N3 = sp.Add(*[argi for argi in Ns.args if argi.has(u3)]).subs(u3, 1)
N4 = sp.Add(*[argi for argi in Ns.args if argi.has(u4)]).subs(u4, 1)
N5 = sp.Add(*[argi for argi in Ns.args if argi.has(u5)]).subs(u5, 1)
N6 = sp.Add(*[argi for argi in Ns.args if argi.has(u6)]).subs(u6, 1)
Nn = sp.Matrix([N1, N2, N3, N4, N5, N6])
#Determinação da carga distribuída na viga superior - PLACA
Lvs = scL[2,2] #cm
g = 300./400 * 9.81/1000 #sp.Symbol('g') #em kN
#g = 0.01 #kN/cm
Nnn = Nn.subs({l: Lvs})
Feg = -g * sp.integrate( Nnn, (r, -Lvs/2, Lvs/2) )
##determinação da força não-linear estimativa do vento analítica
##com a origem no centro do elemento
#xA = -235
#xB = 235
#lv = xB - xA
#vi = 0.0046587 * (r + sp.Rational(lv, 2) )**sp.Rational(1, 5)
#Nvi = sp.expand(sp.Matrix([N1.subs({l: lv}), N2.subs({l: lv}), N3.subs({l: lv}), N4.subs({l: lv}), N5.subs({l: lv}), N6.subs({l: lv})]) * vi)
#Fevi = sp.integrate(Nvi, (r, xA, xB)).evalf()
#resultado de acima
Fevi = -np.array([ 1.15548506063797, 43.1624305176797, 3.43185982081697, 26.0157115449028, 1.65863999243194, -53.9826014556733])
#Fevi = np.zeros(6)
#Determinação da carga distribuída na viga superior !!!!ERRRO AQUI!!!----------------------------------------------------------------
Lvs1 = scL[0,2] #cm
Lvs2 = scL[1,2] #cm
Lvs3 = scL[2,2] #cm
Lvs4 = scL[3,2] #cm
q = 0.02 #kN/cm
#q = 0.01 #kN/cm
Nnn = Nn.subs({l: Lvs})
Feq2 = -q * sp.integrate( Nn.subs({l: Lvs2}), (r, -Lvs2/2, Lvs2/2) )
##Feq2 = np.zeros(6)
Feq3 = -q * sp.integrate( Nn.subs({l: Lvs3}), (r, -Lvs3/2, Lvs3/2) )
Feq4 = -q * sp.integrate( Nn.subs({l: Lvs4}), (r, -Lvs4/2, Lvs4/2) )
#Fevi = -q * sp.integrate( Nn.subs({l: Lvs1}), (r, -Lvs1/2, Lvs1/2) )
##Feq4 = np.zeros(6)
##cargas distribuídas constantes
#def cdcP(s, c, L, gx, gy):
# '''
# Cálculo das forças nodais equivalentes a uma carga distribuída constante ao elemento de pórtico.
# '''
# return np.array([ L*c*(c*gx + gy*s)/6 - 7*L*s*(c*gy - gx*s)/30,
# 7*L*c*(c*gy - gx*s)/30 + L*s*(c*gx + gy*s)/6,
# L**2*(c*gy - gx*s)/60,
# 2*L*c*(c*gx + gy*s)/3 - 8*L*s*(c*gy - gx*s)/15,
# 8*L*c*(c*gy - gx*s)/15 + 2*L*s*(c*gx + gy*s)/3,
# 0,
# L*c*(c*gx + gy*s)/6 - 7*L*s*(c*gy - gx*s)/30,
# 7*L*c*(c*gy - gx*s)/30 + L*s*(c*gx + gy*s)/6,
# -L**2*(c*gy - gx*s)/60])
#
#Feq1 = cdcP(scL[0,0], scL[0,1], scL[0,2], 0.01, 0.)
Feq1 = np.array([0, Fevi[0], Fevi[1], 0, Fevi[2], Fevi[3], 0, Fevi[4], Fevi[5]], dtype=float)
#Feg = cdcP(scL[2,0], scL[2,1], scL[2,2], 0., -0.01)
Feg = np.array([0, Feg[0], Feg[1], 0, Feg[2], Feg[3], 0, Feg[4], Feg[5]], dtype=float)
#Feq2 = cdcP(scL[1,0], scL[1,1], scL[1,2], 0., -0.01)
Feq2 = np.array([0, Feq2[0], Feq2[1], 0, Feq2[2], Feq2[3], 0, Feq2[4], Feq2[5]], dtype=float)
#Feq3 = cdcP(scL[2,0], scL[2,1], scL[2,2], 0., -0.01)
Feq3 = np.array([0, Feq3[0], Feq3[1], 0, Feq3[2], Feq3[3], 0, Feq3[4], Feq3[5]], dtype=float)
#Feq4 = cdcP(scL[3,0], scL[3,1], scL[3,2], 0., -0.01)
Feq4 = np.array([0, Feq4[0], Feq4[1], 0, Feq4[2], Feq4[3], 0, Feq4[4], Feq4[5]], dtype=float)
#Determinação das demais cargas como pórtico (1 já com rotação) !!!ERRO AQUI!!!---------------------------------------------------------------------------------------
Fe = []
RFv = np.array([[0, -1, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0 , 0 ,0 ,0, -1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, -1, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1]])
Fe.append(np.matmul( RFv, Feq1 ))
#Fe.append(Feq1)
Fe.append(Feq2)
Fe.append(Feq3 + Feg)
Fe.append(Feq4)
Fe.append(np.zeros(9))
#Determinação do vetor de cargas nodais equivalentes para cálculo dos deslocamentos
Ft = np.zeros(nos.shape[0]*3)
for e in range(IE.shape[0]):
for i in range(9):
Ft[ ID[e][i] ] += Fe[e][i]
FU = Ft[:dof]
FR = Ft[dof:]
#determinação dos deslocamentos
Un = np.linalg.solve(Ku, FU)
R = np.matmul(Kr, Un) - FR
U = np.zeros(nos.shape[0]*3)
U[:dof] = Un
#reescrevendo os deslocamentos no sistema local do elemento
u = []
for e in range(IE.shape[0]):
ugs = np.zeros(9)
for i in range(9):
ugs[i] = U[ ID[e][i] ]
u.append(ugs)
R13 = np.array([[ 0, 1, 0, 0, 0, 0, 0, 0, 0],
[-1, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 1, 0, 0, 0, 0, 0, 0],
[ 0 ,0 ,0 , 0, 1, 0, 0, 0, 0],
[ 0, 0, 0, -1, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 1, 0],
[ 0, 0, 0, 0, 0, 0, -1, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 1]])
u[0] = np.dot(R13, u[0])
u[-1] = np.dot(R13, u[-1])
#calculo das deformações, tensões, momento, corte e normal em cada elemento no eixo local ------------------------------------------------------------
def esP(U, L, E, A, h, I, pontos=100):
x = np.linspace(-L/2, L/2, pontos)
deslocamentos = U[0]*(-x/L + 2*x**2/L**2) + U[1]*(4*x**2/L**2 - 10*x**3/L**3 - 8*x**4/L**4 + 24*x**5/L**5) + U[2]*(x**2/(2*L) - x**3/L**2 - 2*x**4/L**3 + 4*x**5/L**4) + U[3]*(1 - 4*x**2/L**2) + U[4]*(1 - 8*x**2/L**2 + 16*x**4/L**4) + U[5]*(x - 8*x**3/L**2 + 16*x**5/L**4) + U[6]*(x/L + 2*x**2/L**2) + U[7]*(4*x**2/L**2 + 10*x**3/L**3 - 8*x**4/L**4 - 24*x**5/L**5) + U[8]*(-x**2/(2*L) - x**3/L**2 + 2*x**4/L**3 + 4*x**5/L**4)
rotacoes = U[0]*(-1/L + 4*x/L**2) + U[1]*(8/L**2 - 60*x/L**3 - 96*x**2/L**4 + 480*x**3/L**5) + U[2]*(1/L - 6*x/L**2 - 24*x**2/L**3 + 80*x**3/L**4) + U[4]*(-16/L**2 + 192*x**2/L**4) + U[5]*(-48*x/L**2 + 320*x**3/L**4) + U[6]*(1/L + 4*x/L**2) + U[7]*(8/L**2 + 60*x/L**3 - 96*x**2/L**4 - 480*x**3/L**5) + U[8]*(-1/L - 6*x/L**2 + 24*x**2/L**3 + 80*x**3/L**4) - U[3]*8*x/L**2
#deformacoes = U[0]*(-1/L + 4*x/L**2) + U[1]*(8/L**2 - 60*x/L**3 - 96*x**2/L**4 + 480*x**3/L**5) + U[2]*(1/L - 6*x/L**2 - 24*x**2/L**3 + 80*x**3/L**4) + U[4]*(-16/L**2 + 192*x**2/L**4) + U[5]*(-48*x/L**2 + 320*x**3/L**4) + U[6]*(1/L + 4*x/L**2) + U[7]*(8/L**2 + 60*x/L**3 - 96*x**2/L**4 - 480*x**3/L**5) + U[8]*(-1/L - 6*x/L**2 + 24*x**2/L**3 + 80*x**3/L**4) - U[3]*8*x/L**2
#tensoes = E * deformacoes
momento = - (E * I) * ( U[1]*(8/L**2 - 60*x/L**3 - 96*x**2/L**4 + 480*x**3/L**5) + U[2]*(1/L - 6*x/L**2 - 24*x**2/L**3 + 80*x**3/L**4) + U[4]*(-16/L**2 + 192*x**2/L**4) + U[5]*(-48*x/L**2 + 320*x**3/L**4) + U[7]*(8/L**2 + 60*x/L**3 - 96*x**2/L**4 - 480*x**3/L**5) + U[8]*(-1/L - 6*x/L**2 + 24*x**2/L**3 + 80*x**3/L**4) )
cortante = (E * I) * ( U[1]*(-60/L**3 - 192*x/L**4 + 1440*x**2/L**5) + U[2]*(-6/L**2 - 48*x/L**3 + 240*x**2/L**4) + U[5]*(-48/L**2 + 960*x**2/L**4) + U[7]*(60/L**3 - 192*x/L**4 - 1440*x**2/L**5) + U[8]*(-6/L**2 + 48*x/L**3 + 240*x**2/L**4) + U[4]*384*x/L**4 )
normal = (E * A) * ( U[0]*(-1/L + 4*x/L**2) + U[6]*(1/L + 4*x/L**2) - 8*U[3]*x/L**2 )
#aborgadem reversa
tensoes = normal/A + momento/I * h/2
deformacoes = tensoes/E
return deslocamentos, rotacoes, deformacoes, tensoes, momento, cortante, normal, x
E = 20000. #kN/cm2
deslocamentos1, rotacoes1, deformacoes1, tensoes1, momentos1, corte1, normal1, varElem1 = esP(u[0], scL[0, 2], E, Ar, d, I_z)
deslocamentos2, rotacoes2, deformacoes2, tensoes2, momentos2, corte2, normal2, varElem2 = esP(u[1], scL[1, 2], E, Ar, d, I_z)
deslocamentos3, rotacoes3, deformacoes3, tensoes3, momentos3, corte3, normal3, varElem3 = esP(u[2], scL[2, 2], E, Ar, d, I_z)
deslocamentos4, rotacoes4, deformacoes4, tensoes4, momentos4, corte4, normal4, varElem4 = esP(u[3], scL[3, 2], E, Ar, d, I_z)
deslocamentos5, rotacoes5, deformacoes5, tensoes5, momentos5, corte5, normal5, varElem5 = esP(u[4], scL[4, 2], E, Ar, d, I_z)
#matriz das derivadas das funções de interpolação do pórtico
Bv = -s * sp.diff( sp.diff(Nn, r), r)
Bv2 = sp.diff( sp.diff(Nn, r), r)
Bp = sp.Matrix([[-1/l + 4*r/l**2, 0., 0., -8*r/l**2, 0., 0., 1/l + 4*r/l**2, 0., 0.],
[0., Bv[0], Bv[1], 0., Bv[2], Bv[3], 0., Bv[4], Bv[5] ]])
Bp2 = sp.Matrix([0., Bv2[0], Bv2[1], 0., Bv2[2], Bv2[3], 0., Bv2[4], Bv2[5] ])
#deformações nos elementos
epsilon = []
for e in range(IE.shape[0]):
epsilon.append( Bp.subs({l: scL[e,2]}) * u[e][:, np.newaxis] )
#tensões nos elementos
E = 20000. #kN/cm2
sigma = []
for e in range(IE.shape[0]):
sigma.append( E*epsilon[e] )
#esforços normais
Ap = 143.75 #cm2
N = []
for e in range(IE.shape[0]):
N.append( Ap * sigma[e][0] )
#momentos fletores nas barras
M = []
for e in range(IE.shape[0]):
M.append( 2 * t_w * sp.integrate( s * sigma[e][1], (s, -h/2, h/2 ) ) + 2 * b_f * sp.integrate( s * sigma[e][1], (s, h/2, d/2 ) ) )
#esforço cortante
V = []
for e in range(IE.shape[0]):
V.append( sp.diff(M[e], r) )
#grafico dos deslocamentos, normais, momento e cortante ---------------------------------------------------------------------------------------------
#funcoes de forma de treliça e viga
Nt = sp.Matrix([-r/l + 2*r**2/l**2, 1 - 4*r**2/l**2, r/l + 2*r**2/l**2])
Np = sp.Matrix([Nn[0], Nn[1], Nn[2], Nn[3], Nn[4], Nn[5]])
Ymin = np.min(nos[:,1])
Ymax = np.max(nos[:,1])
Xmin = np.min(nos[:,0])
Xmax = np.max(nos[:,0])
Y = np.linspace(-235, 235, 100)
X1 = np.linspace(-60, 60, 100)
X2 = np.linspace(-200, 200, 100)
X3 = np.linspace(-210, 210, 100)
##esforço normal
#escala_n = 20
#plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
#plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
#plt.plot(-np.ones(100)*N[0]*escala_n - 470, Y)
#plt.plot(np.linspace(-470, 470, 100), np.ones(100)*N[1].subs({r:0})*escala_n + 235)
#plt.plot(-np.ones(100)*N[2].subs({r:0})*escala_n + 470, Y)
#plt.show()
#esforço cortante
V1f = sp.utilities.lambdify([r], V[0], "numpy")
V2f = sp.utilities.lambdify([r], V[1], "numpy")
V3f = sp.utilities.lambdify([r], V[2], "numpy")
V4f = sp.utilities.lambdify([r], V[3], "numpy")
V5f = sp.utilities.lambdify([r], V[4], "numpy")
escala_v = 20
plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
plt.plot(V1f(Y)*escala_v - 470, Y)
plt.plot(X1 - 470 + 60, -V2f(X1)*escala_v + 235)
plt.plot(X2 - 350 + 200, -V3f(X2)*escala_v + 235)
plt.plot(X3 + 50 + 210, -V4f(X3)*escala_v + 235)
plt.plot(V5f(Y)*escala_v + 470, Y)
plt.show()
###momento fletor
M1f = sp.utilities.lambdify([r], M[0], "numpy")
M2f = sp.utilities.lambdify([r], M[1], "numpy")
M3f = sp.utilities.lambdify([r], M[2], "numpy")
M4f = sp.utilities.lambdify([r], M[3], "numpy")
M5f = sp.utilities.lambdify([r], M[4], "numpy")
escala_v = 0.1
plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
plt.plot(-M1f(Y)*escala_v -470, Y)
plt.plot(X1 - 470 + 60, M2f(X1)*escala_v + 235)
plt.plot(X2 - 350 + 200, M3f(X2)*escala_v + 235)
plt.plot(X3 + 50 + 210, M4f(X3)*escala_v + 235)
plt.plot(-M5f(Y)*escala_v +470, Y)
plt.show()
##com as funções de forma ----------------------------------------------------------------------------------
#escala_v = 20.
#plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
#plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
#plt.plot(-normal1*escala_v - 470, varElem1)
#plt.plot(varElem2 + 60 - 470, normal2*escala_v + 235)
#plt.plot(varElem3 + 320 - 470, normal3*escala_v + 235)
#plt.plot(varElem4 + 730 - 470, normal4*escala_v + 235)
#plt.plot(-normal5*escala_v + 470, varElem5)
#plt.show()
#
#escala_v = 20.
#plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
#plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
#plt.plot(-corte1*escala_v - 470, varElem1)
#plt.plot(varElem2 + 60 - 470, corte2*escala_v + 235)
#plt.plot(varElem3 + 320 - 470, corte3*escala_v + 235)
#plt.plot(varElem4 + 730 - 470, corte4*escala_v + 235)
#plt.plot(-corte5*escala_v + 470, varElem5)
#plt.show()
#
#escala_v = 0.1
#plt.plot([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], color="gray") #elementos
#plt.scatter([-scL[0,2], -scL[0,2], scL[0,2], scL[0,2]], [-235, 235, 235, -235], s=15, color="gray") #nós
#plt.plot(-momentos1*escala_v - 470, varElem1)
#plt.plot(varElem2 + 60 - 470, momentos2*escala_v + 235)
#plt.plot(varElem3 + 320 - 470, momentos3*escala_v + 235)
#plt.plot(varElem4 + 730 - 470, momentos4*escala_v + 235)
#plt.plot(-momentos5*escala_v + 470, varElem5)
#plt.show() | mit |
JohnReid/auxiliary-deep-generative-models | adgm/training/base.py | 1 | 4029 | import logging
import sys
import os
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.ioff()
from ..utils import env_paths as paths
import seaborn as sns
import numpy as np
import cPickle as pkl
from logHandler import Logger
logger = Logger.getChildLogger()
class Train(object):
"""
The :class:'Train' class general training functions.
It should be subclassed when implementing new types of training loops.
"""
def __init__(self, model, pickle_f_custom_freq=None, custom_eval_func=None):
"""
Initialisation of the basic architecture and programmatic settings of any training procedure.
This method should be called from any subsequent inheriting training procedure.
:param model: The model to train on.
:param pickle_f_custom_freq: The number of epochs between each serialization, plotting etc.
:param custom_eval_func: The custom evaluation function taking (model, output_path) as arguments.
"""
self.model = model
self.x_dist = None
self.custom_eval_func = custom_eval_func
self.eval_train = {}
self.eval_test = {}
self.eval_validation = {}
self.pickle_f_custom_freq = pickle_f_custom_freq
def train_model(self, *args):
"""
This is where the training of the model is performed.
:param n_epochs: The number of epochs to train for.
"""
raise NotImplementedError
def dump_dicts(self):
"""
Dump the model evaluation dictionaries
"""
p_train = paths.get_plot_evaluation_path_for_model(self.model.get_root_path(), "train_dict.pkl")
pkl.dump(self.eval_train, open(p_train, "wb"))
p_test = paths.get_plot_evaluation_path_for_model(self.model.get_root_path(), "test_dict.pkl")
pkl.dump(self.eval_test, open(p_test, "wb"))
p_val = paths.get_plot_evaluation_path_for_model(self.model.get_root_path(), "validation_dict.pkl")
pkl.dump(self.eval_validation, open(p_val, "wb"))
def plot_eval(self, eval_dict, labels, path_extension=""):
"""
Plot the loss function in a overall plot and a zoomed plot.
:param path_extension: If the plot should be saved in an incremental way.
"""
def plot(x, y, fit, label):
sns.regplot(np.array(x), np.array(y), fit_reg=fit, label=label, scatter_kws={"s": 5})
plt.clf()
plt.subplot(211)
idx = np.array(eval_dict.values()[0]).shape[0]
x = np.array(eval_dict.values())
for i in range(idx):
plot(eval_dict.keys(), x[:, i], False, labels[i])
plt.legend()
plt.subplot(212)
for i in range(idx):
plot(eval_dict.keys()[-int(len(x) * 0.25):], x[-int(len(x) * 0.25):][:, i], True, labels[i])
plt.xlabel('Epochs')
plt.savefig(paths.get_plot_evaluation_path_for_model(self.model.get_root_path(), path_extension+".png"))
def write_to_logger(self, s):
"""
Write a string to the logger and the console.
:param s: A string with the text to print.
"""
logger.info(s)
def add_initial_training_notes(self, s):
"""
Add an initial text for the model as a personal
note, to keep an order of experimental testing.
:param s: A string with the text to print.
"""
if len(s) == 0:
return
line_length = 10
self.write_to_logger("### INITIAL TRAINING NOTES ###")
w_lst = s.split(" ")
new_s = ""
for i in range(len(w_lst)):
if (not i == 0) and (i % line_length == 0):
new_s += "\n"
new_s += " " + w_lst[i]
self.write_to_logger(new_s)
#
# Assign the csv files to which learning curves and test AUPRC values are written
#
def set_csv_files(self, csv_file_dict):
self.learning_csv = csv_file_dict['learningCurves']
self.testeval_csv = csv_file_dict['testAUPRCs']
| mit |
openfisca/combine-calculators | scripts/reformators.py | 1 | 20476 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cma
from sklearn import tree
import numpy as np
import json
import subprocess
import os
import random
import math
from enum import Enum
def sign(number):
"""Will return 1 for positive,
-1 for negative, and 0 for 0"""
try:return number/abs(number)
except ZeroDivisionError:return 0
class EchantillonNotDefinedException(Exception):
pass
class Excalibur():
"""Excalibur is a powerful tool to model, simplify and reform a legislation.
It takes as input a population with data (e.g., salaries/taxes/subsidies)
It provides two functionalities:
1) factorisation of existing legislation based on an economist's goals
2) efficiency to write reforms and evaluate them instantly
Population is given
"""
def __init__(self, target_variable, taxable_variable, price_of_no_regression=0):
self._target_variable = target_variable
self._taxable_variable = taxable_variable
self._max_cost = 0
self._population = None
self._price_of_no_regression = price_of_no_regression
def filter_only_likely_population(self):
"""
Removes unlikely elements in the population
TODO: This should be done by the population generator
:param raw_population:
:return: raw_population without unlikely cases
"""
new_raw_population = []
for case in self._raw_population:
if (int(case['0DA']) <= 1950 or ('0DB' in case and int(case['0DA'] <= 1950))) and 'F' in case and int(case['F']) > 0:
pass
else:
new_raw_population.append(case)
self._raw_population = new_raw_population
def filter_only_no_revenu(self):
"""
Removes people who have a salary from the population
:param raw_population:
:return: raw_population without null salary
"""
new_raw_population = []
for case in self._raw_population:
if case.get('1AJ', 0) < 1 and case.get('1BJ', 0) < 1:
new_raw_population.append(case)
self._raw_population = new_raw_population
def is_optimized_variable(self, var):
return var != self._taxable_variable and var != self._target_variable
def init_parameters(self, parameters, tax_rate_parameters=[], tax_threshold_parameters=[]):
print repr(parameters)
var_total = {}
var_occurences = {}
self._index_to_variable = []
self._all_coefs = []
self._var_to_index = {}
self._var_tax_rate_to_index = {}
self._var_tax_threshold_to_index = {}
self._tax_rate_parameters = []
self._tax_threshold_parameters = []
self._parameters = set(parameters)
index = 0
for person in self._population:
for var in parameters:
if var in person:
if var not in self._var_to_index:
self._index_to_variable.append(var)
var_total[var] = 0
var_occurences[var] = 0
self._var_to_index[var] = index
index += 1
var_total[var] = var_total.get(var, 0) + person[var]
var_occurences[var] = var_occurences.get(var, 0) + 1
for var in self._index_to_variable:
self._all_coefs.append(var_total[var] / var_occurences[var])
for var in tax_rate_parameters:
self._all_coefs.append(0)
self._var_tax_rate_to_index[var] = index
self._tax_rate_parameters.append(var)
index += 1
for var in tax_threshold_parameters:
self._all_coefs.append(5000)
self._var_tax_threshold_to_index[var] = index
self._tax_threshold_parameters.append(var)
index += 1
def find_all_possible_inputs(self, input_variable):
possible_values = set()
for person in self._population:
if input_variable in person:
if person[input_variable] not in possible_values:
possible_values.add(person[input_variable])
return sorted(possible_values)
def find_min_values(self, input_variable, output_variable):
min_values = {}
for person in self._population:
if input_variable not in person:
continue
input = person[input_variable]
if person[output_variable] <= min_values.get(input, 100000):
min_values[input] = person[output_variable]
return min_values
def find_average_values(self, input_variable, output_variable):
values = {}
number_of_values = {}
for person in self._population:
if input_variable not in person:
continue
input = person[input_variable]
values[input] = values.get(input, 0) + person[output_variable]
number_of_values[input] = number_of_values.get(input, 0) + 1
for input in values:
values[input] = values[input] / number_of_values[input]
return values
def find_jumps_rec(self, init_jump_size, possible_inputs, values):
if init_jump_size > 10000:
return
jumps = []
for i in range(1, len(possible_inputs)):
if abs(values[possible_inputs[i]] - values[possible_inputs[i-1]]) > init_jump_size:
jumps.append(possible_inputs[i])
if len(jumps) > 0 and len(jumps) < 5:
return jumps
else:
return self.find_jumps_rec(init_jump_size * 1.1 , possible_inputs, values)
def find_jumps(self, input_variable, output_variable, jumpsize=10, maxjumps=5, method='min'):
"""
This function find jumps in the data
"""
possible_inputs = self.find_all_possible_inputs(input_variable)
# For binary values, jump detection is useless
if len(possible_inputs) < 3:
print 'No segmentation made on variable ' + input_variable + ' because it has less than 3 possible values'
return []
if method == 'min':
values = self.find_min_values(input_variable, output_variable)
elif method == 'average':
values = self.find_average_values(input_variable, output_variable)
else:
assert False, 'Method to find the average value is badly defined, it should be "min" or "average"'
jumps = self.find_jumps_rec(jumpsize, possible_inputs, values)
if len(jumps) <= maxjumps:
return jumps
else:
print 'No segmentation made on variable ' + input_variable + ' because it has more than ' \
+ str(maxjumps + 1) + ' segments'
return []
def add_segments_for_variable(self, variable):
jumps = self.find_jumps(variable, self._target_variable)
print 'Jumps for variable ' + variable + ' are ' + repr(jumps)
if len(jumps) == 0:
return []
segment_names = []
# First segment
segment_name = variable + ' < ' + str(jumps[0])
segment_names.append(segment_name)
for person in self._population:
if variable in person and person[variable] < jumps[0]:
person[segment_name] = 1
# middle segments
for i in range(1, len(jumps)):
if abs(jumps[i-1]-jumps[i]) > 1:
segment_name = str(jumps[i-1]) + ' <= ' + variable + ' < ' + str(jumps[i])
else:
segment_name = variable + ' is ' + str(jumps[i-1])
segment_names.append(segment_name)
for person in self._population:
if variable in person and person[variable] >= jumps[i-1] and person[variable] < jumps[i]:
person[segment_name] = 1
# end segment
segment_name = variable + ' >= ' + str(jumps[-1])
segment_names.append(segment_name)
for person in self._population:
if variable in person and person[variable] >= jumps[-1]:
person[segment_name] = 1
return segment_names
def add_segments(self, parameters, segmentation_parameters):
new_parameters = []
for variable in segmentation_parameters:
new_parameters = new_parameters + self.add_segments_for_variable(variable)
new_parameters = sorted(new_parameters)
return parameters + new_parameters
def simulated_target(self, person, coefs):
simulated_target = 0
threshold = 0
tax_rate = 0
for var in person:
if var in self._parameters:
idx = self._var_to_index[var]
# Adding linear constant
simulated_target += coefs[idx] * person[var]
if var in self._tax_threshold_parameters:
idx = self._var_tax_threshold_to_index[var]
# determining the threshold from which we pay the tax
threshold += coefs[idx] * person[var]
if var in self._tax_rate_parameters:
idx = self._var_tax_rate_to_index[var]
# determining the tax_rate, divided by 100 to help the algorithm converge faster
tax_rate += coefs[idx] * person[var] / 100
simulated_target += person[self._taxable_variable] - (person[self._taxable_variable] - threshold / 10) * tax_rate
return simulated_target
def compute_cost_error(self, simulated, person):
cost = simulated - person[self._target_variable]
error = abs(cost)
error_util = error / (person[self._target_variable] + 1)
if cost < 0:
pissed = 1
else:
pissed = 0
return cost, error, error_util, pissed
def objective_function(self, coefs):
error = 0
error2 = 0
total_cost = 0
pissed_off_people = 0
nb_people = len(self._population)
for person in self._population:
simulated = self.simulated_target(person, coefs)
this_cost, this_error, this_error_util, this_pissed = self.compute_cost_error(simulated, person)
total_cost += this_cost
error += this_error
error2 += this_error_util * this_error_util
pissed_off_people += this_pissed
percentage_pissed_off = float(pissed_off_people) / float(nb_people)
if random.random() > 0.98:
print 'Best: avg change per month: ' + repr(int(error / (12 * len(self._population))))\
+ ' cost: ' \
+ repr(int(self.normalize_on_population(total_cost) / 1000000))\
+ ' M/year and '\
+ repr(int(1000 * percentage_pissed_off)/10) + '% people with lower salary'
cost_of_overbudget = 100000
if self.normalize_on_population(total_cost) > self._max_cost:
error2 += pow(cost_of_overbudget, 2) * self.normalize_on_population(total_cost)
if -self.normalize_on_population(total_cost) < self._min_saving:
error2 += pow(cost_of_overbudget, 2) * self.normalize_on_population(total_cost)
return math.sqrt(error2)
def find_useful_parameters(self, results, threshold=100):
"""
Eliminate useless parameters
"""
new_parameters = []
optimal_values = []
for i in range(len(results)):
if results[i] >= threshold:
new_parameters.append(self._index_to_variable[i])
optimal_values.append(results[i])
else:
print 'Parameter ' + self._index_to_variable[i] + ' was dropped because it accounts to less than '\
+ str(threshold) + ' euros'
return new_parameters, optimal_values
def suggest_reform(self, parameters, max_cost=0, min_saving=0, verbose=False, tax_rate_parameters=[], tax_threshold_parameters=[], percent_not_pissed_off=0):
"""
Find parameters of a reform
:param parameters: variables that will be taken into account
:param max_cost: maximum cost of the reform in total, can be negative if we want savings
:param verbose:
:return: The reform for every element of the population
"""
self._percent_not_pissed_off = percent_not_pissed_off
self._max_cost = max_cost
self._min_saving = min_saving
if (self._max_cost != 0 or self._min_saving != 0) and self._echantillon is None:
raise EchantillonNotDefinedException()
if verbose:
cma.CMAOptions('verb')
self.init_parameters(parameters,
tax_rate_parameters=tax_rate_parameters,
tax_threshold_parameters=tax_threshold_parameters)
# new_parameters = self.add_segments(direct_parameters, barem_parameters)
# self.init_parameters(new_parameters)
res = cma.fmin(self.objective_function, self._all_coefs, 10000.0, options={'maxfevals': 5e3})
# print '\n\n\n Reform proposed: \n'
#
final_parameters = []
i = 0
while i < len(self._index_to_variable):
final_parameters.append({'variable': self._index_to_variable[i],
'value': res[0][i],
'type': 'base_revenu'})
i += 1
offset = len(self._index_to_variable)
while i < offset + len(self._tax_rate_parameters):
final_parameters.append({'variable': self._tax_rate_parameters[i-offset],
'value': res[0][i],
'type': 'tax_rate'})
i += 1
offset = len(self._index_to_variable) + len(self._tax_rate_parameters)
while i < offset + len(self._tax_threshold_parameters):
final_parameters.append({'variable': self._tax_threshold_parameters[i-offset],
'value': res[0][i],
'type': 'tax_threshold'})
i += 1
simulated_results, error, cost, pissed = self.apply_reform_on_population(self._population, coefficients=res[0])
return simulated_results, error, cost, final_parameters, pissed
def population_to_input_vector(self, population):
output = []
for person in population:
person_output = self.person_to_input_vector(person)
output.append(person_output)
return output
def person_to_input_vector(self, person):
return list(person.get(var, 0) for var in self._index_to_variable)
def suggest_reform_tree(self,
parameters,
max_cost=0,
min_saving=0,
verbose=False,
max_depth=3,
image_file=None,
min_samples_leaf=2):
self._max_cost = max_cost
self._min_saving = min_saving
if (self._max_cost != 0 or self._min_saving != 0) and self._echantillon is None:
raise EchantillonNotDefinedException()
self.init_parameters(parameters)
X = self.population_to_input_vector(self._population)
y = map(lambda x: int(x[self._target_variable]), self._population)
clf = tree.DecisionTreeRegressor(max_depth=max_depth,
min_samples_leaf=min_samples_leaf)
clf = clf.fit(X, y)
simulated_results, error, cost, pissed = self.apply_reform_on_population(self._population, decision_tree=clf)
if image_file is not None:
with open( image_file + ".dot", 'w') as f:
f = tree.export_graphviz(clf,
out_file=f,
feature_names=self._index_to_variable,
filled=True,
impurity=True,
proportion=True,
rounded=True,
rotate=True
)
os.system('dot -Tpng ' + image_file + '.dot -o ' + image_file + '.png')
# 'dot -Tpng enfants_age.dot -o enfants_age.png')
# ')
# dot_data = tree.export_graphviz(clf)
#
# graph = pydotplus.graph_from_dot_data(dot_data)
# graph.write_pdf("new_law.pdf")
#
# dot_data = tree.export_graphviz(clf, out_file=None,
# feature_names=self._index_to_variable,
# filled=True, rounded=True,
# special_characters=True)
# graph = pydotplus.graph_from_dot_data(dot_data)
# Image(graph.create_png())
return simulated_results, error, cost, clf
def is_boolean(self, variable):
"""
Defines if a variable only has boolean values
:param variable: The name of the variable of interest
:return: True if all values are 0 or 1, False otherwise
"""
for person in self._population:
if variable in person and person[variable] not in [0, 1]:
return False
return True
def apply_reform_on_population(self, population, coefficients=None, decision_tree=None):
"""
Computes the reform for all the population
:param population:
:param coefficients:
:return:
"""
simulated_results = []
total_error = 0
total_cost = 0
pissed = 0
for i in range(0, len(population)):
if decision_tree:
simulated_result = float(decision_tree.predict(self.person_to_input_vector(population[i]))[0])
elif coefficients is not None:
simulated_result = self.simulated_target(population[i], coefficients)
simulated_results.append(simulated_result)
this_cost, this_error, this_error_util, this_pissed = self.compute_cost_error(simulated_result, population[i])
total_cost += this_cost
total_error += this_error
pissed += this_pissed
total_cost = self.normalize_on_population(total_cost)
return simulated_results, total_error / len(population), total_cost, pissed / float(len(population))
def add_concept(self, concept, function):
if self._population is None:
self._population = list(map(lambda x: {self._target_variable: x[self._target_variable]}, self._raw_population))
for i in range(len(self._raw_population)):
result = function(self._raw_population[i])
if result is not None and result is not False:
self._population[i][concept] = float(result)
def normalize_on_population(self, cost):
if self._echantillon is None or self._echantillon == 0:
raise EchantillonNotDefinedException()
return cost / self._echantillon
def summarize_population(self):
total_people = 0
for family in self._raw_population:
total_people += 1
if '0DB' in family and family['0DB'] == 1:
total_people += 1
if 'F' in family:
total_people += family['F']
# We assume that there are 2000000 people with RSA
# TODO Put that where it belongs in the constructor
self._echantillon = float(total_people) / 30000000
print 'Echantillon of ' + repr(total_people) + ' people, in percent of french population for similar revenu: ' + repr(100 * self._echantillon) + '%'
def load_from_json(self, filename):
with open('../results/' + filename, 'r') as f:
return json.load(f)
def load_openfisca_results(self, filename):
results_openfisca = self.load_from_json(filename + '-openfisca.json')
testcases = self.load_from_json(filename + '-testcases.json')
self._raw_population = testcases[:]
for i in range(len(testcases)):
self._raw_population[i][self._target_variable] = results_openfisca[i][self._target_variable] | gpl-3.0 |
ivano666/tensorflow | tensorflow/examples/skflow/mnist.py | 5 | 3061 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This example showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
### Download and load MNIST data.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
classifier = learn.TensorFlowLinearClassifier(
n_classes=10, batch_size=100, steps=1000, learning_rate=0.01)
classifier.fit(mnist.train.images, mnist.train.labels)
score = metrics.accuracy_score(mnist.test.labels, classifier.predict(mnist.test.images))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
def conv_model(X, y):
# reshape X to 4d tensor with 2nd and 3rd dimensions being image width and height
# final dimension being the number of color channels
X = tf.reshape(X, [-1, 28, 28, 1])
# first conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = learn.ops.conv2d(X, n_filters=32, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# second conv layer will compute 64 features for each 5x5 patch
with tf.variable_scope('conv_layer2'):
h_conv2 = learn.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# densely connected layer with 1024 neurons
h_fc1 = learn.ops.dnn(h_pool2_flat, [1024], activation=tf.nn.relu, dropout=0.5)
return learn.models.logistic_regression(h_fc1, y)
# Training and predicting
classifier = learn.TensorFlowEstimator(
model_fn=conv_model, n_classes=10, batch_size=100, steps=20000,
learning_rate=0.001)
classifier.fit(mnist.train.images, mnist.train.labels)
score = metrics.accuracy_score(mnist.test.labels, classifier.predict(mnist.test.images))
print('Accuracy: {0:f}'.format(score))
| apache-2.0 |
abhitopia/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 62 | 3960 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with ops.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(
dataframe, batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with ops.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [
j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
TobiasLundby/UAST | Module1/exercise43/exercise4_3.py | 1 | 7703 | import fileinput
import sys
import matplotlib.pyplot as plt # For plotting, install as: sudo apt-get install python-matplotlib
from exportkml import kmlclass
class NMEA_data_parser:
### Class variables ###
# GGA field indices based on http://www.gpsinformation.org/dale/nmea.htm#GGA
message_type = 0
GGA_fix_time = 1
GGA_lat = 2
GGA_NS = 3
GGA_lon = 4
GGA_EW = 5
GGA_fix_quality = 6
GGA_num_sat = 7
GGA_horiz_delut = 8
GGA_alt = 9
GGA_alt_unit = 10
GGA_height_geoid = 11
GGA_height_unit = 12
GGA_time_since_update = 13
GGA_DPGS_ID = 14
GGA_checksum = 15
# GSV field indices
GSA_selection = 1
GSA_fix_type = 2
GSA_PRN1 = 3
# 4-16 PRN2-14
GSA_PDOP = 15
GSA_HDOP = 16
GSA_VDOP = 17
GSA_checksum = 18
#### Constructors ###
def __init__(self):
return
def __init__(self,filename):
# Create lists for needed information
self.filename = filename
self.time = []
self.altitude = []
self.satellites_tracked = []
self.latitude = []
self.longtitude = []
self.GNSS_quality = []
self.PDOP = []
self.HDOP = []
self.VDOP = []
return
### Methods ###
def parse(self):
print 'Parsing file:',self.filename
for line in fileinput.input(self.filename): # Parse all lines
line_elements = line.split(",") # Split in elements
if line_elements[self.message_type] == '$GPGGA': # For GGA messages, do:
self.time.append(line_elements[self.GGA_fix_time]) # Store message time
self.altitude.append(line_elements[self.GGA_alt]) # Store altitude
self.satellites_tracked.append(line_elements[self.GGA_num_sat]) # Store tracked satellites
if(len(line_elements[self.GGA_lat])>=6):
self.latitude.append(line_elements[self.GGA_lat]) # Store latitude
else:
self.latitude.append('x')
if(len(line_elements[self.GGA_lat])>=6):
self.longtitude.append(line_elements[self.GGA_lon]) # Store longtitude
else:
self.longtitude.append('x')
self.GNSS_quality.append(line_elements[self.GGA_fix_quality]) # Store GNSS fix quality
elif line_elements[self.message_type] == '$GPGSA': # For GSA messages do:
self.PDOP.append(line_elements[self.GSA_PDOP]) # Store position delution
return
# Plot altitude vs. time
def plot_altitude(self):
print 'Generating altitude vs. time plot'
if len(self.altitude) == 0:
print 'No altitude information'
else:
fig1 = plt.figure()
fig1.canvas.set_window_title('Altitude vs. time')
plt.plot(self.time,self.altitude)
plt.xlabel('Time')
plt.ylabel('Altitude [m]')
plt.title('Altitude vs. time')
plt.show()
return
# Plot tracked satellites vs. time
def plot_tracked_satellites(self):
print 'Generating tracked satellites vs. time plot'
if len(self.satellites_tracked) == 0:
print 'No altitude information'
else:
fig2 = plt.figure()
fig2.canvas.set_window_title('Tracked satellites vs. time')
plt.plot(self.time, self.satellites_tracked)
plt.ylim([5,15])
plt.xlabel('Time')
plt.ylabel(' # tracked satellites')
plt.title('Tracked satellites vs. time')
plt.show()
return
# Convert from DM.m.... to D.d Formula from: http://www.directionsmag.com/site/latlong-converter/
def degree_minutes_to_degree(self,degree_minutes):
splitted = degree_minutes.split('.') # split before and after dot
length = len(splitted[0]) # get length of the first part
degree = splitted[0][0:length-2] # Degrees are anything but the last two digits
minutes = splitted[0][length-2:length]+'.'+splitted[1] # combine the minutes
minutes_in_deg = float(minutes)/float(60) # .d = M.m/60
degree = float(degree) + float(minutes_in_deg) # D.d = D + .d
return degree
# Generate KML-file with drone track.
def generate_track_file(self,filename,name,description,size):
if len(self.latitude) == 0:
print 'No track points'
else:
track = kmlclass()
track.begin(filename,name,description,size)
track.trksegbegin('Segname','Segdesc','yellow','absolute')
for i in range (0,len(self.latitude)):
#for i in range (0,1):
if (self.latitude[i] != 'x' and self.longtitude[i] != 'x'):
track.trkpt(self.degree_minutes_to_degree(self.latitude[i]),self.degree_minutes_to_degree(self.longtitude[i]),float(self.altitude[i]))
track.trksegend()
track.end()
return
# Generate KML-file with drone track colored based on PDOP (GNSS accuracy)
def generate_GNSS_accuracy_file(self,filename,name,description,size):
if len(self.latitude) == 0:
print 'No track points'
else:
track = kmlclass() # Instantiate object
track.begin(filename,name,description,size) # Create the file
prev_color = '' # For decision of new segment
first = True
for i in range(0,len(self.latitude)): # For all pairs of GGA and GSA messages
if(self.latitude[i] != 'x' and self.longtitude[i] != 'x'):
# Deside color and description based on PDOP value
if float(self.PDOP[i]) < 2:
color = 'green'
seg_name = 'level1'
seg_desc = 'PDOP below 2'
elif float(self.PDOP[i]) < 3:
color = 'blue'
seg_name = 'level2'
seg_desc = 'PDOP below 3'
elif float(self.PDOP[i]) < 4:
color = 'yellow'
seg_name = 'level3'
seg_desc = 'PDOP below 4'
elif float(self.PDOP[i]) < 5:
color = 'cyan'
seg_name = 'level4'
seg_desc = 'PDOP below 5'
elif float(self.PDOP[i]) < 6:
color = 'grey'
seg_name = 'level5'
seg_desc = 'PDOP below 6'
else:
color = 'red'
seg_name = 'level6'
seg_desc = 'PDOP above 6'
if(color != prev_color): # We only need to create a new segment, if color changes
if(first != True):
track.trksegend() # End segment before starting a new one (if not the first)
else:
first = False # Bookkeeping
track.trksegbegin(seg_name,seg_desc,color,'absolute') # Start new segment (color)
track.trkpt(self.degree_minutes_to_degree(self.latitude[i]),self.degree_minutes_to_degree(self.longtitude[i]),float(self.altitude[i])) # Add point to segment
prev_color = color # Store color for comparison next time
track.trksegend() # End the last segment
track.end() # Close the file
return
# Main
# Flight data
data = NMEA_data_parser(sys.argv[1]) # Instantiate parser object
data.parse() # Parse the data file
data.plot_altitude() # Plot the altitude
data.plot_tracked_satellites() # Plot tracked satellites
data.generate_track_file('drone_track.kml','Drone track','This is the track of the drone',1)
# 24 hour static data
data24 = NMEA_data_parser(sys.argv[2]) # Instantiate parser object
data24.parse()
data24.generate_GNSS_accuracy_file('GNSS_static_accuracy.kml','Static GNSS accuracy','The colors mark GNSS accuracy',1) # Generate accuracy file
| bsd-3-clause |
lucastheis/isa | code/tools/contours.py | 1 | 1219 | """
Tool for creating contour plots from samples.
"""
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <[email protected]>'
__docformat__ = 'epytext'
__version__ = '1.1.1'
from numpy import histogram2d, cov, sqrt, sum, multiply, dot
from numpy.linalg import inv
try:
from matplotlib.pyplot import clf, contour, axis, draw
except:
pass
def contours(data, bins=20, levels=10, threshold=3., **kwargs):
"""
Estimate and visualize 2D histogram.
@type data: array_like
@param data: data stored in columns
@type bins: integer
@param bins: number of bins per dimension
@type threshold: float
@param threshold: the smaller, the more outliers will be ignored
"""
# detect outliers
error = sqrt(sum(multiply(data, dot(inv(cov(data)), data)), 0))
# make sure at least 90% of the data will be kept
while sum(error < threshold) < 0.9 * data.shape[1]:
threshold += 1.
# exclude outliers
data = data[:, error < threshold]
# compute histogram
Z, X, Y = histogram2d(data[0, :], data[1, :], bins, normed=True)
X = (X[1:] + X[:-1]) / 2.
Y = (Y[1:] + Y[:-1]) / 2.
# contour plot of histogram
contour(X, Y, Z.T, levels, **kwargs)
draw()
| mit |
BoltzmannBrain/nupic | src/nupic/research/monitor_mixin/monitor_mixin_base.py | 13 | 7350 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
MonitorMixinBase class used in monitor mixin framework.
Using a monitor mixin with your algorithm
-----------------------------------------
1. Create a subclass of your algorithm class, with the first parent being the
corresponding Monitor class. For example,
class MonitoredTemporalMemory(TemporalMemoryMonitorMixin,
TemporalMemory): pass
2. Create an instance of the monitored class and use that.
instance = MonitoredTemporalMemory()
# Run data through instance
3. Now you can call the following methods to print monitored data from of your
instance:
- instance.mmPrettyPrintMetrics(instance.mmGetDefaultMetrics())
- instance.mmPrettyPrintTraces(instance.mmGetDefaultTraces())
Each specific monitor also has specific methods you can call to extract data
out of it.
Adding data to a monitor mixin
-----------------------------------------
1. Create a variable for the data you want to capture in your specific monitor's
`mmClearHistory` method. For example,
self._mmTraces["predictedCells"] = IndicesTrace(self, "predicted cells")
Make sure you use the correct type of trace for your data.
2. Add data to this trace in your algorithm's `compute` method (or anywhere
else).
self._mmTraces["predictedCells"].data.append(set(self.getPredictiveCells()))
3. You can optionally add this trace as a default trace in `mmGetDefaultTraces`,
or define a function to return that trace:
def mmGetTracePredictiveCells(self):
Any trace can be converted to a metric using the utility functions provided in
the framework (see `metric.py`).
Extending the functionality of the monitor mixin framework
-----------------------------------------
If you want to add new types of traces and metrics, add them to `trace.py`
and `metric.py`. You can also create new monitors by simply defining new classes
that inherit from MonitorMixinBase.
"""
import abc
import numpy
from prettytable import PrettyTable
from nupic.research.monitor_mixin.plot import Plot
class MonitorMixinBase(object):
"""
Base class for MonitorMixin. Each subclass will be a mixin for a particular
algorithm.
All arguments, variables, and methods in monitor mixin classes should be
prefixed with "mm" (to avoid collision with the classes they mix in to).
"""
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
"""
Note: If you set the kwarg "mmName", then pretty-printing of traces and
metrics will include the name you specify as a tag before every title.
"""
self.mmName = kwargs.get("mmName")
if "mmName" in kwargs:
del kwargs["mmName"]
super(MonitorMixinBase, self).__init__(*args, **kwargs)
# Mapping from key (string) => trace (Trace)
self._mmTraces = None
self._mmData = None
self.mmClearHistory()
def mmClearHistory(self):
"""
Clears the stored history.
"""
self._mmTraces = {}
self._mmData = {}
@staticmethod
def mmPrettyPrintTraces(traces, breakOnResets=None):
"""
Returns pretty-printed table of traces.
@param traces (list) Traces to print in table
@param breakOnResets (BoolsTrace) Trace of resets to break table on
@return (string) Pretty-printed table of traces.
"""
assert len(traces) > 0, "No traces found"
table = PrettyTable(["#"] + [trace.prettyPrintTitle() for trace in traces])
for i in xrange(len(traces[0].data)):
if breakOnResets and breakOnResets.data[i]:
table.add_row(["<reset>"] * (len(traces) + 1))
table.add_row([i] +
[trace.prettyPrintDatum(trace.data[i]) for trace in traces])
return table.get_string().encode("utf-8")
@staticmethod
def mmPrettyPrintMetrics(metrics, sigFigs=5):
"""
Returns pretty-printed table of metrics.
@param metrics (list) Traces to print in table
@param sigFigs (int) Number of significant figures to print
@return (string) Pretty-printed table of metrics.
"""
assert len(metrics) > 0, "No metrics found"
table = PrettyTable(["Metric", "mean", "standard deviation",
"min", "max", "sum", ])
for metric in metrics:
table.add_row([metric.prettyPrintTitle()] + metric.getStats())
return table.get_string().encode("utf-8")
def mmGetDefaultTraces(self, verbosity=1):
"""
Returns list of default traces. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default traces
"""
return []
def mmGetDefaultMetrics(self, verbosity=1):
"""
Returns list of default metrics. (To be overridden.)
@param verbosity (int) Verbosity level
@return (list) Default metrics
"""
return []
def mmGetCellTracePlot(self, cellTrace, cellCount, activityType, title="",
showReset=False, resetShading=0.25):
"""
Returns plot of the cell activity. Note that if many timesteps of
activities are input, matplotlib's image interpolation may omit activities
(columns in the image).
@param cellTrace (list) a temporally ordered list of sets of cell
activities
@param cellCount (int) number of cells in the space being rendered
@param activityType (string) type of cell activity being displayed
@param title (string) an optional title for the figure
@param showReset (bool) if true, the first set of cell activities
after a reset will have a grayscale background
@param resetShading (float) applicable if showReset is true, specifies the
intensity of the reset background with 0.0
being white and 1.0 being black
@return (Plot) plot
"""
plot = Plot(self, title)
resetTrace = self.mmGetTraceResets().data
data = numpy.zeros((cellCount, 1))
for i in xrange(len(cellTrace)):
# Set up a "background" vector that is shaded or blank
if showReset and resetTrace[i]:
activity = numpy.ones((cellCount, 1)) * resetShading
else:
activity = numpy.zeros((cellCount, 1))
activeIndices = cellTrace[i]
activity[list(activeIndices)] = 1
data = numpy.concatenate((data, activity), 1)
plot.add2DArray(data, xlabel="Time", ylabel=activityType, name=title)
return plot
| agpl-3.0 |
dsquareindia/scikit-learn | sklearn/_build_utils/__init__.py | 80 | 2644 | """
Utilities useful during the build.
"""
# author: Andy Mueller, Gael Varoquaux
# license: BSD
from __future__ import division, print_function, absolute_import
import os
from distutils.version import LooseVersion
from numpy.distutils.system_info import get_info
DEFAULT_ROOT = 'sklearn'
CYTHON_MIN_VERSION = '0.23'
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
def build_from_c_and_cpp_files(extensions):
"""Modify the extensions to build from the .c and .cpp files.
This is useful for releases, this way cython is not required to
run python setup.py install.
"""
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources = sources
def maybe_cythonize_extensions(top_path, config):
"""Tweaks for building extensions between release and development mode."""
is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO'))
if is_release:
build_from_c_and_cpp_files(config.ext_modules)
else:
message = ('Please install cython with a version >= {0} in order '
'to build a scikit-learn development version.').format(
CYTHON_MIN_VERSION)
try:
import Cython
if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION:
message += ' Your version of Cython was {0}.'.format(
Cython.__version__)
raise ValueError(message)
from Cython.Build import cythonize
except ImportError as exc:
exc.args += (message,)
raise
config.ext_modules = cythonize(config.ext_modules)
| bsd-3-clause |
pythonvietnam/scikit-learn | sklearn/utils/testing.py | 71 | 26178 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
madjelan/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
sanketloke/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
saiwing-yeung/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
numenta/nupic.research | nupic/research/support/elastic_logger.py | 3 | 9319 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
#
import os
import re
import subprocess
from datetime import datetime
from elasticsearch import Elasticsearch, helpers
from elasticsearch.client.xpack import SqlClient
from elasticsearch.helpers import BulkIndexError
from pandas import DataFrame
from pandas.io.json import json_normalize
from ray.tune.logger import Logger
def create_elastic_client(**kwargs):
"""
Create and configure :class:`elasticsearch.Elasticsearch` client
The following environment variables are used to configure the client:
- **ELASTIC_CLOUD_ID**: The Cloud ID from ElasticCloud. Other host
connection params will be ignored. ("cloud_id")
- **ELASTIC_HOSTS**: List of nodes we should connect to. ("hosts")
- **ELASTIC_AUTH**: http auth information ("http_auth")
:param kwargs: Used to override the environment variables or pass extra
parameters to the :class:`elasticsearch.Elasticsearch`.
:type kwargs: dict
:return: Configured :class:`elasticsearch.Elasticsearch` client
:rtype: :class:`Elasticsearch`
"""
hosts = os.environ.get("ELASTIC_HOSTS")
hosts = None if hosts is None else hosts.split(",")
elasticsearch_args = {
"cloud_id": os.environ.get("ELASTIC_CLOUD_ID"),
"hosts": hosts,
"http_auth": os.environ.get("ELASTIC_AUTH")
}
# Update elasticsearch client arguments from configuration if present
elasticsearch_args.update(kwargs)
return Elasticsearch(**elasticsearch_args)
class ElasticsearchLogger(Logger):
"""
Elasticsearch Logging interface for `ray.tune`.
This logger will upload all the results to an elasticsearch index.
In addition to the regular ray tune log entry, this logger will add the
the last git commit information and the current `logdir` to the results.
The following environment variables are used to configure the
:class:`elasticsearch.Elasticsearch` client:
- **ELASTIC_CLOUD_ID**: The Cloud ID from ElasticCloud. Other host
connection params will be ignored
- **ELASTIC_HOST**: hostname of the elasticsearch node
- **ELASTIC_AUTH**: http auth information ('user:password')
You may override the environment variables or pass extra parameters to the
:class:`elasticsearch.Elasticsearch` client for the specific experiment
using the "elasticsearch_client" configuration key.
The elasticsearch index name is based on the current results root path. You
may override this behavior and use a specific index name for your experiment
using the configuration key `elasticsearch_index`.
"""
def _init(self):
elasticsearch_args = self.config.get("elasticsearch_client", {})
self.client = create_elastic_client(**elasticsearch_args)
# Save git information
self.git_remote = subprocess.check_output(
["git", "ls-remote", "--get-url"]).decode("ascii").strip()
self.git_branch = subprocess.check_output(
["git", "rev-parse", "--abbrev-ref", "HEAD"]).decode("ascii").strip()
self.git_sha = subprocess.check_output(
["git", "rev-parse", "HEAD"]).decode("ascii").strip()
self.git_user = subprocess.check_output(
["git", "log", "-n", "1", "--pretty=format:%an"]).decode("ascii").strip()
# Check for elasticsearch index name in configuration
index_name = self.config.get("elasticsearch_index")
if index_name is None:
# Create default index name based on log path and git repo name
git_root = subprocess.check_output(
["git", "rev-parse", "--show-toplevel"]).decode("ascii").strip()
repo_name = os.path.basename(self.git_remote).rstrip(".git")
path_name = os.path.relpath(self.config["path"], git_root)
index_name = os.path.join(repo_name, path_name)
# slugify index name
index_name = re.sub(r"[\W_]+", "-", index_name)
self.index_name = index_name
self.logdir = os.path.basename(self.logdir)
self.experiment_name = self.config["name"]
self.buffer = []
def on_result(self, result):
"""Given a result, appends it to the existing log."""
log_entry = {
"git": {
"remote": self.git_remote,
"branch": self.git_branch,
"sha": self.git_sha,
"user": self.git_user
},
"logdir": self.logdir
}
# Convert timestamp to ISO-8601
timestamp = result["timestamp"]
result["timestamp"] = datetime.utcfromtimestamp(timestamp).isoformat()
log_entry.update(result)
self.buffer.append(log_entry)
def close(self):
self.flush()
def flush(self):
if len(self.buffer) > 0:
results = helpers.parallel_bulk(client=self.client,
actions=self.buffer,
index=self.index_name,
doc_type=self.experiment_name)
errors = [status for success, status in results if not success]
if errors:
raise BulkIndexError("{} document(s) failed to index.".
format(len(errors)), errors)
self.buffer.clear()
def elastic_dsl(client, dsl, index, **kwargs):
"""
Sends DSL query to elasticsearch and returns the results as a
:class:`pandas.DataFrame`.
:param client: Configured elasticseach client. See :func:`create_elastic_client`
:type client: :class:`elasticseach.Elasticsearch`
:param dsl: Elasticsearch DSL query statement
See https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html # noqa: E501
:type dsl: str
:param index: Index pattern. Usually the same as 'from' part of the SQL
See https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-index.html # noqa: E501
:type index: str
:param kwargs: Any additional keyword arguments will be passed to the initial
:meth:`elasticsearch.Elasticsearch.search` call
:type kwargs: dict
:return: results as a :class:`pandas.DataFrame`.
:rtype: :class:`pandas.DataFrame`
"""
response = helpers.scan(client=client, query=dsl, index=index, **kwargs)
data = []
for row in response:
# Normalize nested dicts in '_source' such as 'config' or 'git'
source = json_normalize(row["_source"]) if "_source" in row else {}
# Squeeze scalar fields returned as arrays in the response by the search API
fields = row.get("fields", {})
fields = {k: v[0] if len(v) == 1 else v for k, v in fields.items()}
data.append({
"_index": row["_index"],
"_type": row["_type"],
**fields,
**source,
})
return DataFrame(data)
def elastic_sql(client, sql, index, **kwargs):
"""
Sends SQL query to elasticsearch and returns the results as a
:class:`pandas.DataFrame`.
:param client: Configured elasticseach client. See :func:`create_elastic_client`
:type client: :class:`elasticseach.Elasticsearch`
:param sql: Elasticsearch SQL query statement
See https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-sql.html # noqa: E501
:type sql: str
:param index: Index pattern. Usually the same as 'from' part of the SQL
See https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-index.html
:type index: str
:param kwargs: Any additional keyword arguments will be passed to the initial
:meth:`elasticsearch.Elasticsearch.search` call
:type kwargs: dict
:return: results as a :class:`pandas.DataFrame`.
:rtype: :class:`pandas.DataFrame`
"""
sql_client = SqlClient(client)
# FIXME: SQL API does not support arrays. See https://github.com/elastic/elasticsearch/issues/33204 # noqa: E501
# For now we translate the SQL into elasticsearch DSL query and use the
# 'search' API to fetch the results
dsl = sql_client.translate({"query": sql})
# Ignore score
if "query" in dsl:
query = dsl["query"]
dsl["query"] = {"constant_score": {"filter": query}}
return elastic_dsl(client, dsl, index, **kwargs)
| agpl-3.0 |
shantanoo/dejavu | dejavu/fingerprint.py | 15 | 5828 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
iterate_structure, binary_erosion)
import hashlib
from operator import itemgetter
IDX_FREQ_I = 0
IDX_TIME_J = 1
######################################################################
# Sampling rate, related to the Nyquist conditions, which affects
# the range frequencies we can detect.
DEFAULT_FS = 44100
######################################################################
# Size of the FFT window, affects frequency granularity
DEFAULT_WINDOW_SIZE = 4096
######################################################################
# Ratio by which each sequential window overlaps the last and the
# next window. Higher overlap will allow a higher granularity of offset
# matching, but potentially more fingerprints.
DEFAULT_OVERLAP_RATIO = 0.5
######################################################################
# Degree to which a fingerprint can be paired with its neighbors --
# higher will cause more fingerprints, but potentially better accuracy.
DEFAULT_FAN_VALUE = 15
######################################################################
# Minimum amplitude in spectrogram in order to be considered a peak.
# This can be raised to reduce number of fingerprints, but can negatively
# affect accuracy.
DEFAULT_AMP_MIN = 10
######################################################################
# Number of cells around an amplitude peak in the spectrogram in order
# for Dejavu to consider it a spectral peak. Higher values mean less
# fingerprints and faster matching, but can potentially affect accuracy.
PEAK_NEIGHBORHOOD_SIZE = 20
######################################################################
# Thresholds on how close or far fingerprints can be in time in order
# to be paired as a fingerprint. If your max is too low, higher values of
# DEFAULT_FAN_VALUE may not perform as expected.
MIN_HASH_TIME_DELTA = 0
MAX_HASH_TIME_DELTA = 200
######################################################################
# If True, will sort peaks temporally for fingerprinting;
# not sorting will cut down number of fingerprints, but potentially
# affect performance.
PEAK_SORT = True
######################################################################
# Number of bits to throw away from the front of the SHA1 hash in the
# fingerprint calculation. The more you throw away, the less storage, but
# potentially higher collisions and misclassifications when identifying songs.
FINGERPRINT_REDUCTION = 20
def fingerprint(channel_samples, Fs=DEFAULT_FS,
wsize=DEFAULT_WINDOW_SIZE,
wratio=DEFAULT_OVERLAP_RATIO,
fan_value=DEFAULT_FAN_VALUE,
amp_min=DEFAULT_AMP_MIN):
"""
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
"""
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
# apply log transform since specgram() returns linear array
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our fliter shape
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# Boolean mask of arr2D with True at peaks
detected_peaks = local_max - eroded_background
# extract peaks
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
# filter peaks
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = [x for x in peaks if x[2] > amp_min] # freq, time, amp
# get indices for frequency and time
frequency_idx = [x[1] for x in peaks_filtered]
time_idx = [x[0] for x in peaks_filtered]
if plot:
# scatter of the peaks
fig, ax = plt.subplots()
ax.imshow(arr2D)
ax.scatter(time_idx, frequency_idx)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title("Spectrogram")
plt.gca().invert_yaxis()
plt.show()
return zip(frequency_idx, time_idx)
def generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):
"""
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
"""
if PEAK_SORT:
peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks):
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
h = hashlib.sha1(
"%s|%s|%s" % (str(freq1), str(freq2), str(t_delta)))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
| mit |
adam-rabinowitz/ngs_analysis | ranges/interval_functions.py | 2 | 2670 | import pandas as pd
def merge_overlaps(
intervals, overlap = 1, return_sorted = True
):
intervals = intervals[['start', 'end']]
intervals = intervals.sort_values(['end', 'start'])
intervals['overlap'] = intervals['end'].shift(1) - intervals['start']
intervals['seperate'] = intervals['overlap'] < overlap
intervals['group'] = intervals['seperate'].cumsum()
intervals = intervals.groupby('group')
# Create and store output dataframe
output = pd.DataFrame()
output['start'] = intervals['start'].min()
output['end'] = intervals['end'].max()
if return_sorted:
output = output.sort_values(['start', 'end'])
output.index = range(output.shape[0])
return(output)
def merge_overlaps_strand(
intervals, overlap=1, return_sorted=True, ignore_strand=False
):
# Merge intervals while ignoring strand
if ignore_strand:
intervals = intervals[['start', 'end']]
intervals = merge_overlaps(
intervals, overlap, False
)
intervals['strand'] = '*'
# Merge intervals while including strand
else:
intervals = intervals[['start', 'end', 'strand']]
intervals = intervals.groupby('strand')
intervals = intervals.apply(
lambda x: merge_overlaps(x, overlap, False)
)
intervals = intervals.reset_index('strand')
intervals = intervals[['start', 'end', 'strand']]
# Process and return output intervals
if return_sorted:
intervals = intervals.sort_values(['start', 'end'])
intervals.index = range(intervals.shape[0])
return(intervals)
def merge_overlaps_chrom(
intervals, overlap=1, return_sorted=True, ignore_strand=False
):
# Generate intervals data frame and split on strand
if ignore_strand:
intervals = intervals[['chr', 'start', 'end']]
intervals['strand'] = '*'
else:
intervals = intervals[['chr', 'start', 'end', 'strand']]
# Group intervals by strand and merge overlaps
intervals = intervals.groupby('chr')
intervals = intervals.apply(
lambda x: merge_overlaps_strand(x, overlap, False)
)
intervals = intervals.reset_index('chr')
# Process and return intervals
print(intervals)
if return_sorted:
intervals = intervals.sort_values(['chr', 'start', 'end', 'strand'])
intervals.index = range(intervals.shape[0])
return(intervals)
x = pd.DataFrame()
x['chr'] = ['chr1', 'chr1', 'chr2', 'chr2', 'chr3']
x['start'] = [0, 5, 0, 5, 0]
x['end'] = [10, 15, 10, 15, 10]
x['strand'] = ['+', '-', '-', '-', '*']
print(x)
y = merge_overlaps_chrom(x, ignore_strand=True)
print(y)
| gpl-2.0 |
vincentchoqueuse/parametrix | examples/ex_line_spectra_model_order_MC.py | 1 | 1041 | from parametrix.line_spectra.signal_models import M_Line_Spectra
from parametrix.line_spectra.classifiers import C_ModelOrder_Line_Spectra_IC,C_ModelOrder_Line_Spectra_NP_IC
from parametrix.monte_carlo.classifiers import MC_Simulations_confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
Fe=1000
N=100
nb_trials=100
""" Example: Model order selection for the line spectra model """
print("--- Signal Model ---")
# Line spectra model
theta_vect=2*np.pi*np.array([56,128])/Fe
x=np.array([1+2j,1.2-0.3j])
model=M_Line_Spectra(theta_vect,x,N,sigma2=0.05)
model.class_label="L_2"
## Model Order Selection
L_vect=np.arange(1,11)
classifier_AIC=C_ModelOrder_Line_Spectra_IC(L_vect,M=20,method="AIC")
classifier_BIC=C_ModelOrder_Line_Spectra_IC(L_vect,M=20,method="BIC")
classifier_BIC2=C_ModelOrder_Line_Spectra_NP_IC(L_vect,M=20,method="BIC")
## Monte Carlo Simulation
mc=MC_Simulations_confusion_matrix([classifier_AIC,classifier_BIC,classifier_BIC2])
mc.trials(model,nb_trials=100)
mc.show_confusion_matrix()
#
plt.show()
| bsd-3-clause |
isomerase/mozziesniff | roboskeeter/plotting/animate_trajectory.py | 2 | 2490 | from matplotlib import animation
from roboskeeter.plotting.plot_environment import plot_windtunnel as pwt
# Params
sim_or_exp = 'simulation' # 'experiment', 'simulation'
experiment = eval(sim_or_exp)
highlight_inside_plume = False
show_plume = False
trajectory_i = None
if trajectory_i is None:
trajectory_i = experiment.trajectories.get_trajectory_numbers().min()
# get df
df = experiment.trajectories.get_trajectory_slice(trajectory_i)
p = df[['position_x', 'position_y', 'position_z']].values
x_t = p.reshape((1, len(p), 3)) # make into correct shape for Jake vdp's code
fig, ax = pwt.plot_windtunnel(experiment.windtunnel)
ax.axis('off')
if show_plume:
pwt.draw_plume(experiment, ax=ax)
# # choose a different color for each trajectory
# colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c='gray')
], [])
pts = sum([ax.plot([], [], [], '*', c='black')
], [])
# # prepare the axes limits
# ax.set_xlim((0, 1))
# ax.set_ylim((-.127, .127))
# ax.set_zlim((0, .254))
# set point-of-view: specified by (altitude degrees, azimuth degrees)
# ax.view_init(90, 0)
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
# animation function. This will be called sequentially with the frame number
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
i = (2 * i) % x_t.shape[1]
print i
for line, pt, xi in zip(lines, pts, x_t):
x, y, z = xi[:i].T
print xi.shape
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
# ax.view_init(30, 0.3 * i)
ax.view_init(90, 0 * i)
fig.canvas.draw()
return lines + pts
# instantiate the animator.
anim = animation.FuncAnimation(fig, animate, init_func=init,
interval=1, blit=False, repeat_delay=8000,
frames=len(p)) # original: frames=500, interval=30, blit=True)
# added writer b/c original func didn't work
Writer = animation.writers['mencoder']
writer = Writer(fps=100, metadata=dict(artist='Richard'), bitrate=1000)
anim.save('{}-{}.mp4'.format(sim_or_exp, trajectory_i), writer=writer)
# plt.show()
| mit |
tetherless-world/ecoop | pyecoop/docs/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.