repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jayflo/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
bnaul/scikit-learn | sklearn/utils/setup.py | 20 | 2759 | import os
from os.path import join
from sklearn._build_utils import gen_from_templates
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension('sparsefuncs_fast',
sources=['sparsefuncs_fast.pyx'],
libraries=libraries)
config.add_extension('_cython_blas',
sources=['_cython_blas.pyx'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('murmurhash',
sources=['murmurhash.pyx', join(
'src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('_fast_dict',
sources=['_fast_dict.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_openmp_helpers',
sources=['_openmp_helpers.pyx'],
libraries=libraries)
# generate _seq_dataset from template
templates = ['sklearn/utils/_seq_dataset.pyx.tp',
'sklearn/utils/_seq_dataset.pxd.tp']
gen_from_templates(templates, top_path)
config.add_extension('_seq_dataset',
sources=['_seq_dataset.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('_weight_vector',
sources=['_weight_vector.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_random",
sources=["_random.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
a-doumoulakis/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 3 | 59518 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
results = []
for k, v in sorted(dictionary.items()):
if isinstance(v, float) or isinstance(v, np.float32) or isinstance(
v, int) or isinstance(v, np.int64) or isinstance(v, np.int32):
results.append('%s = %s' % (k, v))
else:
results.append('Type of %s = %s' % (k, type(v)))
return ', '.join(results)
def _write_dict_to_summary(output_dir, dictionary, current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = core_summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.string_types):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = key
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or a serialized string of Summary.', key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
GraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',
['tags', 'transforms'])
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use an
`Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps == 0:
logging.warning('evaluation steps are 0. If `input_fn` does not raise'
'OutOfRangeError`, the evaluation will never stop.'
'Use steps=None if intended.')
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training_util.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode, metrics=None):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
metrics: Dict of metrics.
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
model_fn_ops = model_fn_results
else:
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
model_fn_ops = model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
return model_fn_ops
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL, metrics)
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
graph_rewrite_specs=(GraphRewriteSpec((tag_constants.SERVING,), ()),)):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will
produce a separate MetaGraphDef within the exported SavedModel, tagged
and rewritten as specified. Defaults to a single entry using the
default serving tag ("serve") and no rewriting.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# We'll write the SavedModel to a temporary directory and then atomically
# rename it at the end. This helps to avoid corrupt / incomplete outputs,
# which could otherwise occur if the job is preempted or otherwise fails
# in the middle of SavedModel creation.
temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Build the base graph
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Export the first MetaGraphDef with variables, assets etc.
with tf_session.Session('') as session:
# pylint: disable=protected-access
saveables = variables._all_saveable_objects()
# pylint: enable=protected-access
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
elif saveables:
saver_for_restore = saver.Saver(saveables, sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# Perform the export
if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:
raise ValueError('The first element of graph_rewrite_specs '
'must specify no transforms.')
untransformed_tags = graph_rewrite_specs[0].tags
# TODO(soergel): switch to main_op or otherwise update when dust settles
builder.add_meta_graph_and_variables(
session, untransformed_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
# pylint: disable=protected-access
base_meta_graph_def = builder._saved_model.meta_graphs[0]
# pylint: enable=protected-access
if graph_rewrite_specs[1:]:
# Prepare the input_names and output_names needed for the
# meta_graph_transform call below.
input_names = [tensor.name
for input_dict in input_alternatives.values()
for tensor in input_dict.values()]
output_names = [tensor.name
for output_alternative in output_alternatives.values()
for tensor in output_alternative[1].values()]
# Write the additional MetaGraphDefs
for graph_rewrite_spec in graph_rewrite_specs[1:]:
# TODO(soergel) consider moving most of this to saved_model.builder_impl
# as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)
transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
base_meta_graph_def, input_names, output_names,
graph_rewrite_spec.transforms, graph_rewrite_spec.tags)
# pylint: disable=protected-access
meta_graph_def = builder._saved_model.meta_graphs.add()
# pylint: enable=protected-access
meta_graph_def.CopyFrom(transformed_meta_graph_def)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |
rseubert/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
nrhine1/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
Monika319/EWEF-1 | Cw2Rezonans/Karolina/BodePlotMultifileCewkaIBez.py | 1 | 1402 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import os
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, FixedLocator
rc('font', family='Consolas')
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, FixedLocator
files = ["sim_zad4_r33.dat","sim_zad1_r30.dat"]
Dane=[]
for NazwaPliku in files:
Dane.append(np.loadtxt(NazwaPliku))
Xex=Dane[0][:, 0]
Yex=10**(Dane[0][:, 1]/20.)/33.
plt.plot(Xex, Yex, "-", label=u"Uwzględniona cewka")
Xsim=Dane[1][:, 0]
Ysim=10**(Dane[1][:, 1]/20.)/33.
plt.plot(Xsim, Ysim, "-", label=u"Bez cewki")
minx=np.round((min((min(Xsim), min(Xex)))/10000))*10000
maxx=np.round((max((max(Xsim), max(Xex)))/10000))*10000
xticks=np.linspace(minx, maxx, 6)
#plt.xscale('log')
#plt.yscale('log')
plt.xlim(minx,maxx)
plt.axes().xaxis.set_major_formatter(FormatStrFormatter("%d"))
##plt.axes().xaxis.set_minor_formatter(FormatStrFormatter("%d"))
##plt.axes().xaxis.set_minor_locator(FixedLocator(xticks))
plt.axes().xaxis.set_major_locator(FixedLocator(xticks))
Opis=u"Wykres 4\nSzeregowy RLC\nOpornik 33 Ω"
Nazwa=u"Z3W2CewkaBezCewki"
plt.title(Opis)
plt.xlabel(u"Częstotliwość f [Hz]")
plt.ylabel(u"Moduł natężenia prądu |I| [A]")
plt.grid(which='both')
plt.legend(loc="best")
plt.savefig(Nazwa +".png", bbox_inches='tight')
plt.show()
| gpl-2.0 |
dfolch/pysal | pysal/contrib/spint/tests/test_gravity.py | 8 | 7153 | """
Tests for gravity-style spatial interaction models
"""
__author__ = 'toshan'
import unittest
import numpy as np
import pandas as pd
import gravity as grav
class TestUnconstrained(unittest.TestCase):
"""Unconstrained class for unit tests"""
def setUp(self):
self.f = np.array([56, 100.8, 173.6, 235.2, 87.36,
28., 100.8, 69.44, 235.2, 145.6,
22., 26.4, 136.4, 123.2, 343.2,
14., 75.6, 130.2, 70.56, 163.8,
22, 59.4, 204.6, 110.88, 171.6])
self.V = np.repeat(np.array([56, 56, 44, 42, 66]), 5)
self.o = np.repeat(np.array(range(1, 6)), 5)
self.W = np.tile(np.array([10, 18, 62, 84, 78]), 5)
self.d = np.tile(np.array(range(1, 6)), 5)
self.dij = np.array([10, 10, 20, 20, 50,
20, 10, 50, 20, 30,
20, 30, 20, 30, 10,
30, 10, 20, 50, 20,
30, 20, 20, 50, 30])
self.dt = pd.DataFrame({'origins': self.o,
'destinations': self.d,
'V': self.V,
'W': self.W,
'Dij': self.dij,
'flows': self.f})
def test_Uconstrained(self):
model = grav.Unconstrained(self.dt, 'origins', 'destinations', 'flows',
['V'], ['W'], 'Dij', 'pow')
V = 1.0
W = 1.0
beta = -1.0
self.assertAlmostEqual(model.p['V'], V, delta=.0001)
self.assertAlmostEqual(model.p['W'], W, delta=.0001)
self.assertAlmostEqual(model.p['beta'], beta, delta=.0001)
class TestProductionConstrained(unittest.TestCase):
"""Production constrained class for unit tests"""
def setUp(self):
self.f = np.array([0, 6469, 7629, 20036, 4690,
6194, 11688, 2243, 8857, 7248,
3559, 9221, 10099, 22866, 3388,
9986, 46618, 11639, 1380, 5261,
5985, 6731, 2704, 12250, 16132])
self.o = np.repeat(1, 25)
self.d = np.array(range(1, 26))
self.dij = np.array([0, 576, 946, 597, 373,
559, 707, 1208, 602, 692,
681, 1934, 332, 595, 906,
425, 755, 672, 1587, 526,
484, 2141, 2182, 410, 540])
self.pop = np.array([1596000, 2071000, 3376000, 6978000, 1345000,
2064000, 2378000, 1239000, 4435000, 1999000,
1274000, 7042000, 834000, 1268000, 1965000,
1046000, 12131000, 4824000, 969000, 2401000,
2410000, 2847000, 1425000, 1089000, 2909000])
self.dt = pd.DataFrame({'origins': self.o,
'destinations': self.d,
'pop': self.pop,
'Dij': self.dij,
'flows': self.f})
def test_Production_Constrained(self):
model = grav.ProductionConstrained(self.dt, 'origins', 'destinations', 'flows',
['pop'], 'Dij', 'pow')
pop = 0.7818262
beta = -0.7365098
self.assertAlmostEqual(model.p['pop'], pop, delta=.0001)
self.assertAlmostEqual(model.p['beta'], beta, delta=.0001)
class TestAttractionConstrained(unittest.TestCase):
"""Attraction constrained class for unit tests"""
def setUp(self):
self.f = np.array([56, 100.8, 173.6, 235.2, 87.36,
28., 100.8, 69.44, 235.2, 145.6,
22., 26.4, 136.4, 123.2, 343.2,
14., 75.6, 130.2, 70.56, 163.8,
22, 59.4, 204.6, 110.88, 171.6])
self.V = np.repeat(np.array([56, 56, 44, 42, 66]), 5)
self.o = np.repeat(np.array(range(1, 6)), 5)
self.W = np.tile(np.array([10, 18, 62, 84, 78]), 5)
self.d = np.tile(np.array(range(1, 6)), 5)
self.dij = np.array([10, 10, 20, 20, 50,
20, 10, 50, 20, 30,
20, 30, 20, 30, 10,
30, 10, 20, 50, 20,
30, 20, 20, 50, 30])
self.dt = pd.DataFrame({'origins': self.o,
'destinations': self.d,
'V': self.V,
'Dij': self.dij,
'flows': self.f})
def test_Attraction_Constrained(self):
model = grav.AttractionConstrained(self.dt, 'origins', 'destinations', 'flows',
['V'], 'Dij', 'pow')
V = 1.0
beta = -1.0
self.assertAlmostEqual(model.p['V'], V, delta=.0001)
self.assertAlmostEqual(model.p['beta'], beta, delta=.0001)
class TestDoublyConstrained(unittest.TestCase):
"""Doubly constrained class for unit tests"""
def setUp(self):
self.f = np.array([0, 180048, 79223, 26887, 198144, 17995, 35563, 30528, 110792,
283049, 0, 300345, 67280, 718673, 55094, 93434, 87987, 268458,
87267, 237229, 0, 281791, 551483, 230788, 178517, 172711, 394481,
29877, 60681, 286580, 0, 143860, 49892, 185618, 181868, 274629,
130830, 382565, 346407, 92308, 0, 252189, 192223, 89389, 279739,
21434, 53772, 287340, 49828, 316650, 0, 141679, 27409, 87938,
30287, 64645, 161645, 144980, 199466, 121366, 0, 134229, 289880,
21450, 43749, 97808, 113683, 89806, 25574, 158006, 0, 437255,
72114, 133122, 229764, 165405, 266305, 66324, 252039, 342948, 0])
self.o = np.repeat(np.array(range(1, 10)), 9)
self.d = np.tile(np.array(range(1, 10)), 9)
self.dij = np.array([0, 219, 1009, 1514, 974, 1268, 1795, 2420, 3174,
219, 0, 831, 1336, 755, 1049, 1576, 2242, 2996,
1009, 831, 0, 505, 1019, 662, 933, 1451, 2205,
1514, 1336, 505, 0, 1370, 888, 654, 946, 1700,
974, 755, 1019, 1370, 0, 482, 1144, 2278, 2862,
1268, 1049, 662, 888, 482, 0, 662, 1795, 2380,
1795, 1576, 933, 654, 1144, 662, 0, 1287, 1779,
2420, 2242, 1451, 946, 2278, 1795, 1287, 0, 754,
3147, 2996, 2205, 1700, 2862, 2380, 1779, 754, 0])
self.dt = pd.DataFrame({'Origin': self.o,
'Destination': self.d,
'flows': self.f,
'Dij': self.dij})
def test_Doubly_Constrained(self):
model = grav.DoublyConstrained(self.dt, 'Origin', 'Destination', 'flows', 'Dij', 'exp')
beta = -0.0007369
self.assertAlmostEqual(model.p['beta'], beta, delta=.0000001)
if __name__ == '__main__':
unittest.main() | bsd-3-clause |
siutanwong/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 243 | 7461 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
dednal/chromium.src | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 94 | 3083 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows but it is not
# clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
# run_breakpad_browser_process_crash_test is flaky.
# See http://crbug.com/317890
tests_to_disable.append('run_breakpad_browser_process_crash_test')
# See http://crbug.com/332301
tests_to_disable.append('run_breakpad_crash_in_syscall_test')
# It appears that crash_service.exe is not being reliably built by
# default in the CQ. See: http://crbug.com/380880
tests_to_disable.append('run_breakpad_untrusted_crash_test')
tests_to_disable.append('run_breakpad_trusted_crash_in_startup_test')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
shaneknapp/spark | python/pyspark/testing/pandasutils.py | 14 | 15816 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import shutil
import tempfile
import unittest
import warnings
from contextlib import contextmanager
from distutils.version import LooseVersion
import pandas as pd
from pandas.api.types import is_list_like
from pandas.core.dtypes.common import is_numeric_dtype
from pandas.testing import assert_frame_equal, assert_index_equal, assert_series_equal
from pyspark import pandas as ps
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.indexes import Index
from pyspark.pandas.series import Series
from pyspark.pandas.utils import default_session, SPARK_CONF_ARROW_ENABLED
from pyspark.testing.sqlutils import SQLTestUtils
tabulate_requirement_message = None
try:
from tabulate import tabulate # noqa: F401
except ImportError as e:
# If tabulate requirement is not satisfied, skip related tests.
tabulate_requirement_message = str(e)
have_tabulate = tabulate_requirement_message is None
matplotlib_requirement_message = None
try:
import matplotlib # type: ignore # noqa: F401
except ImportError as e:
# If matplotlib requirement is not satisfied, skip related tests.
matplotlib_requirement_message = str(e)
have_matplotlib = matplotlib_requirement_message is None
plotly_requirement_message = None
try:
import plotly # type: ignore # noqa: F401
except ImportError as e:
# If plotly requirement is not satisfied, skip related tests.
plotly_requirement_message = str(e)
have_plotly = plotly_requirement_message is None
class PandasOnSparkTestCase(unittest.TestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
cls.spark = default_session()
cls.spark.conf.set(SPARK_CONF_ARROW_ENABLED, True)
@classmethod
def tearDownClass(cls):
# We don't stop Spark session to reuse across all tests.
# The Spark session will be started and stopped at PyTest session level.
# Please see pyspark/pandas/conftest.py.
pass
def assertPandasEqual(self, left, right, check_exact=True):
if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame):
try:
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
kwargs = dict(check_freq=False)
else:
kwargs = dict()
if LooseVersion(pd.__version__) < LooseVersion("1.1.1"):
# Due to https://github.com/pandas-dev/pandas/issues/35446
check_exact = check_exact \
and all([is_numeric_dtype(dtype) for dtype in left.dtypes]) \
and all([is_numeric_dtype(dtype) for dtype in right.dtypes])
assert_frame_equal(
left,
right,
check_index_type=("equiv" if len(left.index) > 0 else False),
check_column_type=("equiv" if len(left.columns) > 0 else False),
check_exact=check_exact,
**kwargs
)
except AssertionError as e:
msg = (
str(e)
+ "\n\nLeft:\n%s\n%s" % (left, left.dtypes)
+ "\n\nRight:\n%s\n%s" % (right, right.dtypes)
)
raise AssertionError(msg) from e
elif isinstance(left, pd.Series) and isinstance(right, pd.Series):
try:
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
kwargs = dict(check_freq=False)
else:
kwargs = dict()
if LooseVersion(pd.__version__) < LooseVersion("1.1.1"):
# Due to https://github.com/pandas-dev/pandas/issues/35446
check_exact = check_exact \
and is_numeric_dtype(left.dtype) \
and is_numeric_dtype(right.dtype)
assert_series_equal(
left,
right,
check_index_type=("equiv" if len(left.index) > 0 else False),
check_exact=check_exact,
**kwargs
)
except AssertionError as e:
msg = (
str(e)
+ "\n\nLeft:\n%s\n%s" % (left, left.dtype)
+ "\n\nRight:\n%s\n%s" % (right, right.dtype)
)
raise AssertionError(msg) from e
elif isinstance(left, pd.Index) and isinstance(right, pd.Index):
try:
if LooseVersion(pd.__version__) < LooseVersion("1.1.1"):
# Due to https://github.com/pandas-dev/pandas/issues/35446
check_exact = check_exact \
and is_numeric_dtype(left.dtype) \
and is_numeric_dtype(right.dtype)
assert_index_equal(left, right, check_exact=check_exact)
except AssertionError as e:
msg = (
str(e)
+ "\n\nLeft:\n%s\n%s" % (left, left.dtype)
+ "\n\nRight:\n%s\n%s" % (right, right.dtype)
)
raise AssertionError(msg) from e
else:
raise ValueError("Unexpected values: (%s, %s)" % (left, right))
def assertPandasAlmostEqual(self, left, right):
"""
This function checks if given pandas objects approximately same,
which means the conditions below:
- Both objects are nullable
- Compare floats rounding to the number of decimal places, 7 after
dropping missing values (NaN, NaT, None)
"""
if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame):
msg = (
"DataFrames are not almost equal: "
+ "\n\nLeft:\n%s\n%s" % (left, left.dtypes)
+ "\n\nRight:\n%s\n%s" % (right, right.dtypes)
)
self.assertEqual(left.shape, right.shape, msg=msg)
for lcol, rcol in zip(left.columns, right.columns):
self.assertEqual(lcol, rcol, msg=msg)
for lnull, rnull in zip(left[lcol].isnull(), right[rcol].isnull()):
self.assertEqual(lnull, rnull, msg=msg)
for lval, rval in zip(left[lcol].dropna(), right[rcol].dropna()):
self.assertAlmostEqual(lval, rval, msg=msg)
self.assertEqual(left.columns.names, right.columns.names, msg=msg)
elif isinstance(left, pd.Series) and isinstance(right, pd.Series):
msg = (
"Series are not almost equal: "
+ "\n\nLeft:\n%s\n%s" % (left, left.dtype)
+ "\n\nRight:\n%s\n%s" % (right, right.dtype)
)
self.assertEqual(left.name, right.name, msg=msg)
self.assertEqual(len(left), len(right), msg=msg)
for lnull, rnull in zip(left.isnull(), right.isnull()):
self.assertEqual(lnull, rnull, msg=msg)
for lval, rval in zip(left.dropna(), right.dropna()):
self.assertAlmostEqual(lval, rval, msg=msg)
elif isinstance(left, pd.MultiIndex) and isinstance(right, pd.MultiIndex):
msg = (
"MultiIndices are not almost equal: "
+ "\n\nLeft:\n%s\n%s" % (left, left.dtype)
+ "\n\nRight:\n%s\n%s" % (right, right.dtype)
)
self.assertEqual(len(left), len(right), msg=msg)
for lval, rval in zip(left, right):
self.assertAlmostEqual(lval, rval, msg=msg)
elif isinstance(left, pd.Index) and isinstance(right, pd.Index):
msg = (
"Indices are not almost equal: "
+ "\n\nLeft:\n%s\n%s" % (left, left.dtype)
+ "\n\nRight:\n%s\n%s" % (right, right.dtype)
)
self.assertEqual(len(left), len(right), msg=msg)
for lnull, rnull in zip(left.isnull(), right.isnull()):
self.assertEqual(lnull, rnull, msg=msg)
for lval, rval in zip(left.dropna(), right.dropna()):
self.assertAlmostEqual(lval, rval, msg=msg)
else:
raise ValueError("Unexpected values: (%s, %s)" % (left, right))
def assert_eq(self, left, right, check_exact=True, almost=False):
"""
Asserts if two arbitrary objects are equal or not. If given objects are Koalas DataFrame
or Series, they are converted into pandas' and compared.
:param left: object to compare
:param right: object to compare
:param check_exact: if this is False, the comparison is done less precisely.
:param almost: if this is enabled, the comparison is delegated to `unittest`'s
`assertAlmostEqual`. See its documentation for more details.
"""
lobj = self._to_pandas(left)
robj = self._to_pandas(right)
if isinstance(lobj, (pd.DataFrame, pd.Series, pd.Index)):
if almost:
self.assertPandasAlmostEqual(lobj, robj)
else:
self.assertPandasEqual(lobj, robj, check_exact=check_exact)
elif is_list_like(lobj) and is_list_like(robj):
self.assertTrue(len(left) == len(right))
for litem, ritem in zip(left, right):
self.assert_eq(litem, ritem, check_exact=check_exact, almost=almost)
elif (lobj is not None and pd.isna(lobj)) and (robj is not None and pd.isna(robj)):
pass
else:
if almost:
self.assertAlmostEqual(lobj, robj)
else:
self.assertEqual(lobj, robj)
@staticmethod
def _to_pandas(obj):
if isinstance(obj, (DataFrame, Series, Index)):
return obj.to_pandas()
else:
return obj
class TestUtils(object):
@contextmanager
def temp_dir(self):
tmp = tempfile.mkdtemp()
try:
yield tmp
finally:
shutil.rmtree(tmp)
@contextmanager
def temp_file(self):
with self.temp_dir() as tmp:
yield tempfile.mktemp(dir=tmp)
class ComparisonTestBase(PandasOnSparkTestCase):
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def pdf(self):
return self.psdf.to_pandas()
def compare_both(f=None, almost=True):
if f is None:
return functools.partial(compare_both, almost=almost)
elif isinstance(f, bool):
return functools.partial(compare_both, almost=f)
@functools.wraps(f)
def wrapped(self):
if almost:
compare = self.assertPandasAlmostEqual
else:
compare = self.assertPandasEqual
for result_pandas, result_spark in zip(f(self, self.pdf), f(self, self.psdf)):
compare(result_pandas, result_spark.to_pandas())
return wrapped
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Notes
-----
Replicated from pandas/_testing/_warnings.py.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False): # doctest: +SKIP
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning): # doctest: +SKIP
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(actual_warning.category, expected_warning):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. ",
"File where warning is raised: {} != ".format(actual_warning.filename),
"{}. Warning message: {}".format(caller.filename, actual_warning.message),
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = "Did not see expected warning of class {}".format(repr(expected_warning.__name__))
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError("Caused unexpected warning(s): {}".format(repr(extra_warnings)))
| apache-2.0 |
ocelot-collab/ocelot | demos/ebeam/linac_orb_correction_micado.py | 1 | 2964 | """
Linac Orbit Correction.
S.Tomin. 09.2019
"""
from ocelot import *
from ocelot.gui.accelerator import *
import dogleg_lattice as dl
from ocelot.cpbd.orbit_correction import *
from ocelot.cpbd.response_matrix import *
import seaborn as sns
import logging
#logging.basicConfig(level=logging.INFO)
method = MethodTM()
method.global_method = SecondTM
# introduce misalignment
dl.qi_77_i1.dx = -100e-6
dl.qi_77_i1.dy = 100e-6
dl.qi_85_i1.dx = 100e-6
dl.qi_85_i1.dy = -100e-6
lat = MagneticLattice(dl.cell, method=method)
tws = twiss(lat, tws0=dl.tws0)
#plot_opt_func(lat, tws, top_plot=["Dy"])
#plt.show()
orb = Orbit(lat)
orb.orbit_solver = MICADO(epsilon_x=0.001, epsilon_y=0.001, epsilon_ksi=1e-5)
# orb.orbit_solver = OrbitSVD(epsilon_x=0.001, epsilon_y=0.001)
method = LinacRmatrixRM(lattice=orb.lat, hcors=orb.hcors, vcors=orb.vcors, bpms=orb.bpms)
#drm_method = LinacDisperseSimRM
orb.response_matrix = ResponseMatrix(method=method)
# in that case the initial Twiss is needed only for initial energy
orb.response_matrix.calculate(tw_init=dl.tws0)
ax = sns.heatmap(orb.response_matrix.df, annot=True)
ax.set_title("Orbit response matrix")
plt.show()
s_bpm_b = np.array([p.s for p in orb.bpms])
x_bpm_b, y_bpm_b = method.read_virtual_orbit(p_init=Particle())
fig, ax = plot_API(lat)
ax.plot(s_bpm_b, x_bpm_b*1000., "ro-", label="X [mm]")
ax.plot(s_bpm_b, y_bpm_b*1000., "bo-", label="Y [mm]")
ax.legend()
plt.show()
orb.correction(beta=0)
vcors_angle = np.array([cor.angle for cor in orb.vcors])
hcors_angle = np.array([cor.angle for cor in orb.hcors])
x_bpm, y_bpm = method.read_virtual_orbit(p_init=Particle())
print(f"{orb.orbit_solver.__class__.__name__} method: ||HVcor|| = {np.sqrt(np.sum(hcors_angle * hcors_angle) + np.sum(vcors_angle * vcors_angle))}")
print(f"{orb.orbit_solver.__class__.__name__} method: ||XY_bpm|| = {np.sqrt(np.sum(x_bpm * x_bpm) + np.sum(y_bpm * y_bpm))}")
print(f"OrbitSVD method: ||HVcor|| = {0.000206}")
print(f"OrbitSVD method: ||XY_bpm|| = {0.0004769}")
p_list = lattice_track(lat, method.particle0)
s = [p.s for p in p_list]
x = [p.x*1000. for p in p_list]
y = [p.y*1000. for p in p_list]
print("result2 = ", np.sqrt(np.sum(x_bpm**2) + np.sum(y_bpm**2)))
fig, ax = plot_API(lat)
ax.plot(s_bpm_b, x_bpm*1000., "ro" , label="X [mm]")
ax.plot(s, x, 'r')
ax.plot(s_bpm_b, y_bpm*1000., "bo", label="Y [mm]")
ax.plot(s, y, 'b')
plt.show()
rm = orb.response_matrix.extract(cor_list=['CIX.78.I1', 'CIY.80.I1'], bpm_list=['BPMA.85.I1', 'BPMA.87.I1'])
print(rm)
rm[0, 0] = 2
rm[2, 0] = 0.1
rm[1, 1] = -0.1
print(rm)
orb.response_matrix.inject(cor_list=['CIX.78.I1', 'CIY.80.I1'], bpm_list=['BPMA.85.I1', 'BPMA.87.I1'], inj_matrix=rm)
rm_check = orb.response_matrix.extract(cor_list=['CIX.78.I1', 'CIY.80.I1'], bpm_list=['BPMA.85.I1', 'BPMA.87.I1'])
print("is RMs equal: ", (np.equal(rm, rm_check)).all())
ax = sns.heatmap(orb.response_matrix.df, annot=True)
ax.set_title("Orbit response matrix")
plt.show()
| gpl-3.0 |
Jokiva/Computational-Physics | lecture 6/Problem 3.py | 1 | 3250 | import numpy as np
import matplotlib.pyplot as plt
# codes for brent solver
def brent_solver(f, xs, epsilon=1e-9, max_it=500):
# check if invalid data is inputed
if len(xs) != 2:
raise ValueError("xs must contain two initial value bracketing a root")
elif (f(xs[0]) * f(xs[1]) > 0):
raise ValueError("the two initial values must braceking a root")
# if data is valid, make a copy of it
# also initialization
a = xs[0]
b = xs[1]
c = a
fa = f(a)
fb = f(b)
fc = fa
e = d = b - a
p = q = s = 0
cnt = 0
# start interation
# sentinel will mark if we will continue the iteration
# from the accracy and steps o iteration
conti_iter = True
while conti_iter:
# if c is a better approximation than b
# exchange the value
if np.abs(fc) < np.abs(fb):
a = b
b = c
c = a
fa = fb
fb = fc
fc = fa
# calculate the midpoint relative to b
xm = 0.5 * (c - b)
# check if the midpoint can be taken as a root
if (np.abs(xm) < epsilon) or (fb == 0):
conti_iter = False
# check step of iteration
if cnt > max_it:
conti_iter = False
# use bisection if the previous step width is too small
# or the last step did not improve
if (np.abs(e) < epsilon) or (np.abs(fa) <= np.abs(fb)):
e = d = xm
# otherwise we will use the interpolation method
else:
# only secant is proper
if a == c:
p = 2 * xm * fb / fa
q = (fb - fa) / fa
# muller is proper
else:
p = 2 * xm * fb * (fa - fb) / fc / fc - (b - a) * fb * (fb - fc) / (fa * fc)
q = (fa / fc - 1) * (fb / fc - 1) * (fb / fa - 1)
# make p positive
if p > 0:
q = -q
else:
p = -p
# update previous step size
s = e
e = d
# use interpolation if possible
# or we use bisection
if (2 * p < 3 * xm * q - np.abs(epsilon * q)) and (p < np.abs(0.5 * s * q)):
d = p / q
else:
e = d = xm
a = b
fa = fb
if np.abs(d) > epsilon:
b = b + d
else:
b = b + np.sign(xm) * epsilon
# calculate ew function value
fb = f(b)
# be sure to bracket the root
if fb * fc > 0:
c = a
fc = fa
e = b - a
d = e
cnt += 1
return [b, cnt]
# sixth order lengendre polynomial
def f(x):
return 924 * x ** 6 - 2772 * x ** 5 + 3150 * x ** 4 - 1680 * x ** 3 + 420 * x ** 2 - 42 * x + 1
# sketch it
x = np.linspace(0, 1, num=1000)
plt.figure()
plt.plot(x, f(x))
plt.grid()
plt.show()
del x
# global finding
x = np.linspace(0, 1)
x = list(x)
for i in range(len(x) - 1):
try:
root, step = brent_solver(f, [x[i], x[i+1]])
print(root)
except ValueError:
i += 1 | gpl-3.0 |
vshtanko/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
GoogleCloudPlatform/ai-platform-samples | training/sklearn/structured/custom_routines/trainer/my_pipeline.py | 1 | 1401 | import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
# A pipeline to select a subset of features, given their positional indices
class PositionalSelector(BaseEstimator, TransformerMixin):
def __init__(self, positions):
self.positions = positions
def fit(self, X, y=None):
return self
def transform(self, X):
return np.array(X)[:, self.positions]
class StripString(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
strip = np.vectorize(str.strip)
return strip(np.array(X))
# A simple one hot encoder for scikit-learn
class SimpleOneHotEncoder(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.values = []
for c in range(X.shape[1]):
Y = X[:, c]
values = {v: i for i, v in enumerate(np.unique(Y))}
self.values.append(values)
return self
def transform(self, X):
X = np.array(X)
matrices = []
for c in range(X.shape[1]):
Y = X[:, c]
mat = np.zeros(shape=(len(Y), len(self.values[c])), dtype=np.int8)
for i, x in enumerate(Y):
if x in self.values[c]:
mat[i][self.values[c][x]] = 1
matrices.append(mat)
res = np.concatenate(matrices, axis=1)
return res
| apache-2.0 |
zorojean/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 17 | 24947 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1)
Y_norm_sq = (Y ** 2).sum(axis=1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
manulera/ModellingCourse | ReAct/Python/Example_Repressilator_Decay.py | 1 | 3889 |
import numpy as np
from Gilles import *
import matplotlib.pyplot as plt
from ColorLine import *
from mpl_toolkits.mplot3d import Axes3D
# Initial conditions
user_input = ['TetR_site', 1,
'TetR_site_b', 0,
'TetR_mRNA', 0,
'TetR_Prot', 0,
'TetR_Prot2', 0,
'LacI_site', 0,
'LacI_site_b', 1,
'LacI_mRNA', 0,
'LacI_Prot', 0,
'LacI_Prot2', 0,
'Gammacl_site', 0,
'Gammacl_site_b', 1,
'Gammacl_mRNA', 0,
'Gammacl_Prot', 0,
'Gammacl_Prot2', 0,
'GFP_site', 0,
'GFP_site_b', 1,
'GFP_mRNA', 0,
'GFP_Prot', 0
]
# Reaction template ((stoch_1,reactant_1,stoch_2,reactant_2),(stoch_1,product_1,stoch_2,product_2),k)
k = (100, 50, 40, 20, 3, 1, 0, 30, 10)
reactions = (
(-1,'TetR_site'), (1, 'TetR_mRNA'), k[0] , # mRNA transcription
(1,'TetR_mRNA'),(), k[1], # mRNA degradation
(-1, 'TetR_mRNA'), (1, 'TetR_Prot'), k[2], # Translation
(1, 'TetR_Prot'), (), k[3], # Protein degradation
(2, 'TetR_Prot'), (1, 'TetR_Prot2'), k[4],
(1, 'TetR_Prot2'), (2, 'TetR_Prot'), k[5],
(1, 'TetR_Prot2'), (), k[6],
(1, 'TetR_Prot2',1,'Gammacl_site'), (1,'Gammacl_site_b'), k[7], # Binding of the repressor
(1, 'Gammacl_site_b'), (1, 'TetR_Prot2',1,'Gammacl_site'), k[8], # Unbinding of the repressor
# ------------------------------------------------------------------------------------------------------------------
(-1, 'Gammacl_site'), (1, 'Gammacl_mRNA'), k[0], # mRNA transcription
(1, 'Gammacl_mRNA'), (), k[1], # mRNA degradation
(-1, 'Gammacl_mRNA'), (1, 'Gammacl_Prot'), k[2], # Translation
(1, 'Gammacl_Prot'), (), k[3], # Protein degradation
(2, 'Gammacl_Prot'), (1, 'Gammacl_Prot2'), k[4],
(1, 'Gammacl_Prot2'), (2, 'Gammacl_Prot'), k[5],
(1, 'Gammacl_Prot2'), (), k[6],
(1, 'Gammacl_Prot2', 1, 'LacI_site'), (1, 'LacI_site_b'), k[7], # Binding of the repressor
(1, 'LacI_site_b'), (1, 'Gammacl_Prot2', 1, 'LacI_site'), k[8], # Unbinding of the repressor
# ------------------------------------------------------------------------------------------------------------------
(-1, 'LacI_site'), (1, 'LacI_mRNA'), k[0], # mRNA transcription
(1, 'LacI_mRNA'), (), k[1], # mRNA degradation
(-1, 'LacI_mRNA'), (1, 'LacI_Prot'), k[2], # Translation
(1, 'LacI_Prot'), (), k[3], # Protein degradation
(2, 'LacI_Prot'), (1, 'LacI_Prot2'), k[4],
(1, 'LacI_Prot2'), (2, 'LacI_Prot'), k[5],
(1, 'LacI_Prot2'), (), k[6],
(1, 'LacI_Prot2', 1, 'TetR_site'), (1, 'TetR_site_b'), k[7], # Binding of the repressor
(1, 'TetR_site_b'), (1, 'LacI_Prot2', 1, 'TetR_site'), k[8], # Unbinding of the repressor
# ------------------------------------------------------------------------------------------------------------------
(-1, 'GFP_site'), (1, 'GFP_mRNA'), k[0]*3, # mRNA transcription
(1, 'GFP_mRNA'), (), k[1], # mRNA degradation
(-1, 'GFP_mRNA'), (1, 'GFP_Prot'), k[2], # Translation
(1, 'GFP_Prot'), (), k[3], # Protein degradation
(1, 'TetR_Prot2',1,'GFP_site'), (1,'GFP_site_b'), k[4], # Binding of the repressor
(1, 'GFP_site_b'), (1, 'TetR_Prot2',1,'GFP_site'), k[5], # Unbinding of the repressor
)
# dt is used for the deterministic calculation, and the
dt=0.01
t = np.arange(0, 80, dt)
(solution,(tgill, valsgill, _, _),rows,mode)=ReAct(user_input,reactions,t,mode=1)
Gillesplot(solution,t,tgill, valsgill,rows,mode,which2plot=['TetR_Prot','Gammacl_Prot','LacI_Prot','GFP_Prot'])
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.plot(solution[:,rows['TetR_Prot']],solution[:,rows['Gammacl_Prot']],solution[:,rows['LacI_Prot']])
plt.show() | gpl-3.0 |
weinbe58/QuSpin | docs/downloads/a4ca601309f5f844f14dda93534620e3/example6.py | 3 | 5597 | from __future__ import print_function, division
import sys,os
# line 4 and line 5 below are for development purposes and can be removed
qspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,qspin_path)
from quspin.operators import hamiltonian,exp_op,quantum_operator # operators
from quspin.basis import spinful_fermion_basis_1d # Hilbert space basis
from quspin.tools.measurements import obs_vs_time # calculating dynamics
import numpy as np # general math functions
from numpy.random import uniform,choice # tools for doing random sampling
from time import time # tool for calculating computation time
import matplotlib.pyplot as plt # plotting library
#####################################################################
# example 6 #
# In this script we demonstrate how to use QuSpin's to create #
# a disordered Fermi-Hubbard model with a parameter-dependent #
# Hamiltonian, and measure the imbalance on different lattice #
# sites (see arXiv:1501.05661). We also show how to prepare #
# fermion Fock states, and do disorder averaging. #
#####################################################################
##### setting parameters for simulation
# simulation parameters
n_real = 100 # number of realizations
n_boot = 100 # number of bootstrap samples to calculate error
# physical parameters
L = 8 # system size
N = L//2 # number of particles
N_up = N//2 + N % 2 # number of fermions with spin up
N_down = N//2 # number of fermions with spin down
w_list = [1.0,4.0,10.0] # disorder strength
J = 1.0 # hopping strength
U = 5.0 # interaction strength
# range in time to evolve system
start,stop,num=0.0,35.0,101
t = np.linspace(start,stop,num=num,endpoint=True)
#
###### create the basis
# build spinful fermions basis
basis = spinful_fermion_basis_1d(L,Nf=(N_up,N_down))
#
##### create model
# define site-coupling lists
hop_right = [[-J,i,i+1] for i in range(L-1)] # hopping to the right OBC
hop_left = [[J,i,i+1] for i in range(L-1)] # hopping to the left OBC
int_list = [[U,i,i] for i in range(L)] # onsite interaction
# site-coupling list to create the sublattice imbalance observable
sublat_list = [[(-1.0)**i/N,i] for i in range(0,L)]
# create static lists
operator_list_0 = [
["+-|", hop_left], # up hop left
["-+|", hop_right], # up hop right
["|+-", hop_left], # down hop left
["|-+", hop_right], # down hop right
["n|n", int_list], # onsite interaction
]
imbalance_list = [["n|",sublat_list],["|n",sublat_list]]
# create operator dictionary for quantum_operator class
# add key for Hubbard hamiltonian
operator_dict=dict(H0=operator_list_0)
# add keys for local potential in each site
for i in range(L):
# add to dictioanry keys h0,h1,h2,...,hL with local potential operator
operator_dict["n"+str(i)] = [["n|",[[1.0,i]]],["|n",[[1.0,i]]]]
#
###### setting up operators
# set up hamiltonian dictionary and observable (imbalance I)
no_checks = dict(check_pcon=False,check_symm=False,check_herm=False)
H_dict = quantum_operator(operator_dict,basis=basis,**no_checks)
I = hamiltonian(imbalance_list,[],basis=basis,**no_checks)
# strings which represent the initial state
s_up = "".join("1000" for i in range(N_up))
s_down = "".join("0010" for i in range(N_down))
# basis.index accepts strings and returns the index
# which corresponds to that state in the basis list
i_0 = basis.index(s_up,s_down) # find index of product state
psi_0 = np.zeros(basis.Ns) # allocate space for state
psi_0[i_0] = 1.0 # set MB state to be the given product state
print("H-space size: {:d}, initial state: |{:s}>(x)|{:s}>".format(basis.Ns,s_up,s_down))
#
# define function to do dynamics for different disorder realizations.
def real(H_dict,I,psi_0,w,t,i):
# body of function goes below
ti = time() # start timing function for duration of reach realisation
# create a parameter list which specifies the onsite potential with disorder
params_dict=dict(H0=1.0)
for j in range(L):
params_dict["n"+str(j)] = uniform(-w,w)
# using the parameters dictionary construct a hamiltonian object with those
# parameters defined in the list
H = H_dict.tohamiltonian(params_dict)
# use exp_op to get the evolution operator
U = exp_op(H,a=-1j,start=t.min(),stop=t.max(),num=len(t),iterate=True)
psi_t = U.dot(psi_0) # get generator psi_t for time evolved state
# use obs_vs_time to evaluate the dynamics
t = U.grid # extract time grid stored in U, and defined in exp_op
obs_t = obs_vs_time(psi_t,t,dict(I=I))
# print reporting the computation time for realization
print("realization {}/{} completed in {:.2f} s".format(i+1,n_real,time()-ti))
# return observable values
return obs_t["I"]
#
###### looping over different disorder strengths
for w in w_list:
I_data = np.vstack([real(H_dict,I,psi_0,w,t,i) for i in range(n_real)])
##### averaging and error estimation
I_avg = I_data.mean(axis=0) # get mean value of I for all time points
# generate bootstrap samples
bootstrap_gen = (I_data[choice(n_real,size=n_real)].mean(axis=0) for i in range(n_boot))
# generate the fluctuations about the mean of I
sq_fluc_gen = ((bootstrap-I_avg)**2 for bootstrap in bootstrap_gen)
I_error = np.sqrt(sum(sq_fluc_gen)/n_boot)
##### plotting results
plt.errorbar(t,I_avg,I_error,marker=".",label="w={:.2f}".format(w))
# configuring plots
plt.xlabel("$Jt$",fontsize=18)
plt.ylabel("$\\mathcal{I}$",fontsize=18)
plt.grid(True)
plt.tick_params(labelsize=16)
plt.legend(loc=0)
plt.tight_layout()
plt.savefig('fermion_MBL.pdf', bbox_inches='tight')
#plt.show()
plt.close() | bsd-3-clause |
MPBA/pyHRV | setup.py | 1 | 1904 | # coding=utf-8
from setuptools import setup
setup(
name='pyphysio',
packages=['pyphysio',
'pyphysio.estimators',
'pyphysio.filters',
'pyphysio.indicators',
'pyphysio.segmentation',
'pyphysio.tools',
'pyphysio.tests',
'pyphysio.sqi',
],
package_data={'pyphysio.tests': ['data/*']},
version='2.1',
description='Python library for physiological signals analysis (IBI & HRV, ECG, BVP, EDA, RESP...)',
author='MPBA FBK',
author_email='[email protected]',
url='https://github.com/MPBA/pyphysio',
keywords=['eda', 'gsr', 'ecg', 'bvp', 'signal', 'analysis', 'physiological', 'pyhrv', 'hrv'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
install_requires=[
'numpy',
'scipy',
'matplotlib'
],
requires=[
'pytest',
],
)
print("")
print("")
print("")
print("----------------------------------")
print("| |")
print("| Thanks for using 'pyphysio'! |")
print("| |")
print("----------------------------------")
print("")
print("Remember to cite pyphysio in your publications:")
print("Bizzego et al. (2019) 'pyphysio: A physiological signal processing library for data science approaches in physiology', SoftwareX")
print("")
print("----------------------------------")
| gpl-3.0 |
aminert/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/indexers.py | 2 | 6390 | """
Low-dependency indexing utilities.
"""
import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
# -----------------------------------------------------------
# Indexer Identification
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
Parameters
----------
key : object
Returns
-------
bool
"""
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_scalar_indexer(indexer, arr_value) -> bool:
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
"""
Check if we have an empty indexer.
Parameters
----------
indexer : object
arr_value : np.ndarray
Returns
-------
bool
"""
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# -----------------------------------------------------------
# Indexer Validation
def check_setitem_lengths(indexer, value, values) -> None:
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
The key for the setitem
value : array-like
The value for the setitem
values : array-like
The values being set into
Returns
-------
None
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't
match.
"""
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
elif isinstance(indexer, slice):
# slice
if is_list_like(value) and len(values):
if len(value) != length_of_indexer(indexer, values):
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
def validate_indices(indices: np.ndarray, n: int) -> None:
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
length of the array being indexed
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = "'indices' contains values less than allowed ({} < {})".format(
min_idx, -1
)
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
# -----------------------------------------------------------
# Indexer Conversion
def maybe_convert_indices(indices, n: int):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
The array of indices that we are to convert.
n : int
The number of elements in the array that we are indexing.
Returns
-------
valid_indices : array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError : one of the converted indices either exceeded the number
of elements (specified by `n`) OR was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
# -----------------------------------------------------------
# Unsorted
def length_of_indexer(indexer, target=None) -> int:
"""
return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
| apache-2.0 |
Soncrates/stock-study | bin/newSharpe.py | 1 | 7807 | import logging
import warnings
#warnings.warn("period must be positive", RuntimeWarning)
try:
xrange
except NameError:
xrange = range
import numpy as np
import pandas as pd
from libFinance import HELPER as FINANCE
from libDebug import cpu
'''
Sharpe Ratio
(Portfollio Expected Return - Risk Free Rate) / Portfolio Risk
Expected Return a.k.a Mean
Risk a.k.a Standard Deviation
The Sharpe ratio, also known as the reward-to-variability ratio, is perhaps the most common portfolio management metric.
The excess return of the portfolio over the risk-free rate is standardized by the risk of the excess of the portfolio return.
Hypothetically, investors should always be able to invest in government bonds and obtain the risk-free rate of return.
The Sharpe ratio determines the expected realized return over that minimum.
Within the risk-reward framework of portfolio theory, higher risk investments should produce high returns.
As a result, a high Sharpe ratio indicates superior risk-adjusted performance.
'''
class PORTFOLIO :
columns = ['returns','risk','sharpe']
@classmethod
def validate(cls, data, **kwargs) :
target = "stocks"
stocks = kwargs.get(target,[])
target = "portfolios"
portfolios = kwargs.get(target,25000)
target = "period"
period = kwargs.get(target,252)
target = "risk_free_rate"
risk_free_rate = kwargs.get(target,0.02)
if portfolios < 0 :
logging.warn("portfolios must be positive")
portfolios = 0
if period < 0 :
logging.warn("period must be positive")
period = 0
if risk_free_rate < 0 :
logging.warn("risk_free_rate must be positive")
risk_free_rate = 0
if data is None :
logging.warn('No data!')
return data, stocks, portfolios, risk_free_rate, period
flag = len(stocks) == 0 and isinstance(data, pd.Series)
if flag :
return data, stocks, portfolios, risk_free_rate, period
logging.info(stocks)
logging.info(data)
#ticker_list = data.index.values
ticker_list = data.columns.values
ticker_list = list(ticker_list)
logging.debug(ticker_list)
stocks = filter(lambda x : x in ticker_list, stocks)
if not isinstance(stocks,list) :
stocks = list(stocks)
flag = len(stocks) == 0
if flag :
data = None
else :
#data = data.filter(items=stocks,axis='index')
data = data.filter(items=stocks,axis='columns')
logging.debug(data)
return data, stocks, portfolios, risk_free_rate, period
@classmethod
@cpu
def findWeightedSharpe(cls, data, weights, risk_free_rate=0.02, period=252) :
if not isinstance(data,pd.DataFrame) :
logging.warn("prices are not in a dataframe {}".format(type(data)))
data = pd.DataFrame(data)
#calculate mean daily return and covariance of daily returns
mean = data.mean()
cov_matrix = data.cov()
returns, risk, sharpe = cls._sharpe(cov_matrix, mean, period, risk_free_rate, weights)
ret = dict(zip(['returns', 'risk', 'sharpe'],[returns,risk,sharpe]))
logging.info(ret)
return ret
@classmethod
def _weights(cls, size, num_portfolios) :
low = 0.1
high = low + low + (1/size)
for i in xrange(num_portfolios):
#select random weights for portfolio holdings
weights = np.random.uniform(low=low, high=high, size=size)
weights = np.array(weights)
#rebalance weights to sum to 1
weights /= np.sum(weights)
yield weights, i
@classmethod
def _sharpe(cls, cov_matrix, mean, period, risk_free_rate, weights) :
magic = np.dot(cov_matrix, weights)
magic_number = np.dot(weights.T,magic)
#calculate return and volatility
returns = np.sum(mean * weights) * period
risk = np.sqrt(magic_number) * np.sqrt(period)
#calculate Sharpe Ratio (return - risk free rate / volatility)
sharpe = 0
if risk != 0 :
sharpe = ( returns - risk_free_rate ) / risk
return returns, risk, sharpe
@classmethod
def transformReturns(cls, returns) :
ret = FINANCE.findDailyReturns(returns)
mean = ret.mean()
cov_matrix = ret.cov()
#logging.info(cov_matrix)
#logging.info(ret)
#logging.info(mean)
return ret, mean, cov_matrix
@classmethod
def _find(cls, data, stocks, num_portfolios, risk_free_rate, period) :
#set up array to hold results
#We have increased the size of the array to hold the weight values for each stock
size = len(stocks)
ret = np.zeros((3+size,num_portfolios))
returns, mean, cov_matrix = cls.transformReturns(data)
for weights, i in cls._weights(size, num_portfolios) :
returns, risk, sharpe = cls._sharpe(cov_matrix, mean, period, risk_free_rate, weights)
#store results in results array
ret[0,i] = returns
ret[1,i] = risk
ret[2,i] = sharpe
for j in range(len(weights)):
ret[j+3,i] = weights[j]
#convert results array to Pandas DataFrame
columns = cls.columns + stocks
ret = pd.DataFrame(ret.T,columns=columns)
logging.debug(ret.head(3))
logging.debug(ret.tail(3))
return ret
@classmethod
def find(cls, data, **kwargs) :
data, stocks, num_portfolios, risk_free_rate, period = cls.validate(data, **kwargs)
if data is None :
return pd.DataFrame(), pd.DataFrame()
ret = cls._find(data, stocks, num_portfolios, risk_free_rate, period)
#locate position of portfolio with highest Sharpe Ratio
max_sharpe = ret['sharpe'].idxmax()
max_sharpe_port = ret.iloc[max_sharpe]
#locate positon of portfolio with minimum risk
min_vol = ret['risk'].idxmin()
min_vol_port = ret.iloc[min_vol]
return max_sharpe_port, min_vol_port
if __name__ == "__main__" :
import sys
import logging
from libCommon import INI_READ
from libUtils import ENVIRONMENT
from libFinance import STOCK_TIMESERIES
env = ENVIRONMENT.instance()
log_msg = '%(module)s.%(funcName)s(%(lineno)s) %(levelname)s - %(message)s'
logging.basicConfig(stream=sys.stdout, format=log_msg, level=logging.DEBUG)
def prep(*ini_list) :
ini_list = filter(lambda x : "benchmark" in x , ini_list)
print (ini_list)
for path, section, key, stock_list in INI_READ.read(*ini_list) :
if section == 'Index' : pass
else : continue
yield key, stock_list
file_list = env.list_filenames('local/historical_prices/*pkl')
ini_list = env.list_filenames('local/*.ini')
reader = STOCK_TIMESERIES.init()
for name, stock_list in prep(*ini_list) :
for stock in stock_list :
print ((stock,name))
data = reader.extract_from_yahoo(stock)
if data is None : continue
ret = data[['Adj Close']]
print (ret.head(2))
print (ret.tail(2))
print (ret.mean())
print (ret.std())
print (ret.mean()[0])
print (ret.std()[0])
print (HELPER.find(ret,period=FINANCE.YEAR,span=0))
print (HELPER.find(ret,period=FINANCE.YEAR))
print ((stock,name))
| lgpl-2.1 |
sdh11/gnuradio | gr-dtv/examples/atsc_ctrlport_monitor.py | 7 | 6277 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
import matplotlib
matplotlib.use("QT4Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from gnuradio.ctrlport.GNURadioControlPortClient import (
GNURadioControlPortClient, TTransportException,
)
import numpy
from numpy.fft import fftpack
"""
If a host is running the ATSC receiver chain with ControlPort
turned on, this script will connect to the host using the hostname and
port pair of the ControlPort instance and display metrics of the
receiver. The ATSC publishes information about the success of the
Reed-Solomon decoder and Viterbi metrics for use here in displaying
the link quality. This also gets the equalizer taps of the receiver
and displays the frequency response.
"""
class atsc_ctrlport_monitor(object):
def __init__(self, host, port):
argv = [None, host, port]
radiosys = GNURadioControlPortClient(argv=argv, rpcmethod='thrift')
self.radio = radiosys.client
print(self.radio)
vt_init_key = 'dtv_atsc_viterbi_decoder0::decoder_metrics'
data = self.radio.getKnobs([vt_init_key])[vt_init_key]
init_metric = numpy.mean(data.value)
self._viterbi_metric = 100*[init_metric,]
table_col_labels = ('Num Packets', 'Error Rate', 'Packet Error Rate',
'Viterbi Metric', 'SNR')
self._fig = plt.figure(1, figsize=(12,12), facecolor='w')
self._sp0 = self._fig.add_subplot(4,1,1)
self._sp1 = self._fig.add_subplot(4,1,2)
self._sp2 = self._fig.add_subplot(4,1,3)
self._plot_taps = self._sp0.plot([], [], 'k', linewidth=2)
self._plot_psd = self._sp1.plot([], [], 'k', linewidth=2)
self._plot_data = self._sp2.plot([], [], 'ok', linewidth=2, markersize=4, alpha=0.05)
self._ax2 = self._fig.add_subplot(4,1,4)
self._table = self._ax2.table(cellText=[len(table_col_labels)*['0']],
colLabels=table_col_labels,
loc='center')
self._ax2.axis('off')
cells = self._table.properties()['child_artists']
for c in cells:
c.set_lw(0.1) # set's line width
c.set_ls('solid')
c.set_height(0.2)
ani = animation.FuncAnimation(self._fig, self.update_data, frames=200,
fargs=(self._plot_taps[0], self._plot_psd[0],
self._plot_data[0], self._table),
init_func=self.init_function,
blit=True)
plt.show()
def update_data(self, x, taps, psd, syms, table):
try:
eqdata_key = 'dtv_atsc_equalizer0::taps'
symdata_key = 'dtv_atsc_equalizer0::data'
rs_nump_key = 'dtv_atsc_rs_decoder0::num_packets'
rs_numbp_key = 'dtv_atsc_rs_decoder0::num_bad_packets'
rs_numerrs_key = 'dtv_atsc_rs_decoder0::num_errors_corrected'
vt_metrics_key = 'dtv_atsc_viterbi_decoder0::decoder_metrics'
snr_key = 'probe2_f0::SNR'
data = self.radio.getKnobs([])
eqdata = data[eqdata_key]
symdata = data[symdata_key]
rs_num_packets = data[rs_nump_key]
rs_num_bad_packets = data[rs_numbp_key]
rs_num_errors_corrected = data[rs_numerrs_key]
vt_decoder_metrics = data[vt_metrics_key]
snr_est = data[snr_key]
vt_decoder_metrics = numpy.mean(vt_decoder_metrics.value)
self._viterbi_metric.pop()
self._viterbi_metric.insert(0, vt_decoder_metrics)
except TTransportException:
sys.stderr.write("Lost connection, exiting")
sys.exit(1)
ntaps = len(eqdata.value)
taps.set_ydata(eqdata.value)
taps.set_xdata(list(range(ntaps)))
self._sp0.set_xlim(0, ntaps)
self._sp0.set_ylim(min(eqdata.value), max(eqdata.value))
fs = 6.25e6
freq = numpy.linspace(-fs / 2, fs / 2, 10000)
H = numpy.fft.fftshift(fftpack.fft(eqdata.value, 10000))
HdB = 20.0*numpy.log10(abs(H))
psd.set_ydata(HdB)
psd.set_xdata(freq)
self._sp1.set_xlim(0, fs / 2)
self._sp1.set_ylim([min(HdB), max(HdB)])
self._sp1.set_yticks([min(HdB), max(HdB)])
self._sp1.set_yticklabels(["min", "max"])
nsyms = len(symdata.value)
syms.set_ydata(symdata.value)
syms.set_xdata(nsyms*[0,])
self._sp2.set_xlim([-1, 1])
self._sp2.set_ylim([-10, 10])
per = float(rs_num_bad_packets.value) / float(rs_num_packets.value)
ber = float(rs_num_errors_corrected.value) / float(187*rs_num_packets.value)
table._cells[(1,0)]._text.set_text("{0}".format(rs_num_packets.value))
table._cells[(1,1)]._text.set_text("{0:.2g}".format(ber))
table._cells[(1,2)]._text.set_text("{0:.2g}".format(per))
table._cells[(1,3)]._text.set_text("{0:.1f}".format(numpy.mean(self._viterbi_metric)))
table._cells[(1,4)]._text.set_text("{0:.4f}".format(snr_est.value[0]))
return (taps, psd, syms, table)
def init_function(self):
return self._plot_taps + self._plot_psd + self._plot_data
if __name__ == "__main__":
host = sys.argv[1]
port = sys.argv[2]
m = atsc_ctrlport_monitor(host, port)
| gpl-3.0 |
ozak/geopandas | examples/plotting_basemap_background.py | 1 | 2671 | """
Adding a background map to plots
--------------------------------
This example shows how you can add a background basemap to plots created
with the geopandas ``.plot()`` method. This makes use of the
`contextily <https://github.com/darribas/contextily>`__ package to retrieve
web map tiles from several sources (OpenStreetMap, Stamen).
"""
# sphinx_gallery_thumbnail_number = 3
import geopandas
###############################################################################
# Let's use the NYC borough boundary data that is available in geopandas
# datasets. Plotting this gives the following result:
df = geopandas.read_file(geopandas.datasets.get_path('nybb'))
ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
###############################################################################
# Convert the data to Web Mercator
# ================================
#
# Web map tiles are typically provided in
# `Web Mercator <https://en.wikipedia.org/wiki/Web_Mercator>`__
# (`EPSG 3857 <https://epsg.io/3857>`__), so we need to make sure to convert
# our data first to the same CRS to combine our polygons and background tiles
# in the same map:
df = df.to_crs(epsg=3857)
###############################################################################
# Contextily helper function
# ==========================
#
# We define a small helper function that uses
# `contextily <https://github.com/darribas/contextily>`__ to add a map
# as background to an existing plot:
import contextily as ctx
def add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):
xmin, xmax, ymin, ymax = ax.axis()
basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, url=url)
ax.imshow(basemap, extent=extent, interpolation='bilinear')
# restore original x/y limits
ax.axis((xmin, xmax, ymin, ymax))
###############################################################################
# Add background tiles to plot
# ============================
#
# Now we can use the above function to easily add a background map to our
# plot. The `zoom` keyword is required and let's you specify the detail of the
# map tiles (be careful to not specify a too high `zoom` level, as this can
# result in a large download):
ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
add_basemap(ax, zoom=10)
###############################################################################
# By default, contextily uses the Stamen Terrain style. We can specify a
# different style using ``ctx.sources``:
ax = df.plot(figsize=(10, 10), alpha=0.5, edgecolor='k')
add_basemap(ax, zoom=11, url=ctx.sources.ST_TONER_LITE)
ax.set_axis_off()
| bsd-3-clause |
imaculate/scikit-learn | examples/linear_model/plot_ridge_coeffs.py | 157 | 2785 | """
==============================================================
Plot Ridge coefficients as a function of the L2 regularization
==============================================================
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color in the left plot represents one different dimension of the
coefficient vector, and this is displayed as a function of the
regularization parameter. The right plot shows how exact the solution
is. This example illustrates how a well defined solution is
found by Ridge regression and how regularization affects the
coefficients and their values. The plot on the right shows how
the difference of the coefficients from the estimator changes
as a function of regularization.
In this example the dependent variable Y is set as a function
of the input features: y = X*w + c. The coefficient vector w is
randomly sampled from a normal distribution, whereas the bias term c is
set to a constant.
As alpha tends toward zero the coefficients found by Ridge
regression stabilize towards the randomly sampled vector w.
For big alpha (strong regularisation) the coefficients
are smaller (eventually converging at 0) leading to a
simpler and biased solution.
These dependencies can be observed on the left plot.
The right plot shows the mean squared error between the
coefficients found by the model and the chosen vector w.
Less regularised models retrieve the exact
coefficients (error is equal to 0), stronger regularised
models increase the error.
Please note that in this example the data is non-noisy, hence
it is possible to extract the exact coefficients.
"""
# Author: Kornel Kielczewski -- <[email protected]>
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
clf = Ridge()
X, y, w = make_regression(n_samples=10, n_features=10, coef=True,
random_state=1, bias=3.5)
coefs = []
errors = []
alphas = np.logspace(-6, 6, 200)
# Train the model with different regularisation strengths
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
errors.append(mean_squared_error(clf.coef_, w))
# Display results
plt.figure(figsize=(20, 6))
plt.subplot(121)
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.subplot(122)
ax = plt.gca()
ax.plot(alphas, errors)
ax.set_xscale('log')
plt.xlabel('alpha')
plt.ylabel('error')
plt.title('Coefficient error as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ocefpaf/iris | docs/iris/example_code/Meteorology/lagged_ensemble.py | 2 | 5965 | """
Seasonal ensemble model plots
=============================
This example demonstrates the loading of a lagged ensemble dataset from the
GloSea4 model, which is then used to produce two types of plot:
* The first shows the "postage stamp" style image with an array of 14 images,
one for each ensemble member with a shared colorbar. (The missing image in
this example represents ensemble member number 6 which was a failed run)
* The second plot shows the data limited to a region of interest, in this case
a region defined for forecasting ENSO (El Nino-Southern Oscillation), which,
for the purposes of this example, has had the ensemble mean subtracted from
each ensemble member to give an anomaly surface temperature. In practice a
better approach would be to take the climatological mean, calibrated to the
model, from each ensemble member.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.plot as iplt
def realization_metadata(cube, field, fname):
"""
A function which modifies the cube's metadata to add a "realization"
(ensemble member) coordinate from the filename if one doesn't already exist
in the cube.
"""
# add an ensemble member coordinate if one doesn't already exist
if not cube.coords("realization"):
# the ensemble member is encoded in the filename as *_???.pp where ???
# is the ensemble member
realization_number = fname[-6:-3]
import iris.coords
realization_coord = iris.coords.AuxCoord(
np.int32(realization_number), "realization"
)
cube.add_aux_coord(realization_coord)
def main():
# extract surface temperature cubes which have an ensemble member
# coordinate, adding appropriate lagged ensemble metadata
surface_temp = iris.load_cube(
iris.sample_data_path("GloSea4", "ensemble_???.pp"),
iris.Constraint("surface_temperature", realization=lambda value: True),
callback=realization_metadata,
)
# -------------------------------------------------------------------------
# Plot #1: Ensemble postage stamps
# -------------------------------------------------------------------------
# for the purposes of this example, take the last time element of the cube
last_timestep = surface_temp[:, -1, :, :]
# Make 50 evenly spaced levels which span the dataset
contour_levels = np.linspace(
np.min(last_timestep.data), np.max(last_timestep.data), 50
)
# Create a wider than normal figure to support our many plots
plt.figure(figsize=(12, 6), dpi=100)
# Also manually adjust the spacings which are used when creating subplots
plt.gcf().subplots_adjust(
hspace=0.05,
wspace=0.05,
top=0.95,
bottom=0.05,
left=0.075,
right=0.925,
)
# iterate over all possible latitude longitude slices
for cube in last_timestep.slices(["latitude", "longitude"]):
# get the ensemble member number from the ensemble coordinate
ens_member = cube.coord("realization").points[0]
# plot the data in a 4x4 grid, with each plot's position in the grid
# being determined by ensemble member number the special case for the
# 13th ensemble member is to have the plot at the bottom right
if ens_member == 13:
plt.subplot(4, 4, 16)
else:
plt.subplot(4, 4, ens_member + 1)
cf = iplt.contourf(cube, contour_levels)
# add coastlines
plt.gca().coastlines()
# make an axes to put the shared colorbar in
colorbar_axes = plt.gcf().add_axes([0.35, 0.1, 0.3, 0.05])
colorbar = plt.colorbar(cf, colorbar_axes, orientation="horizontal")
colorbar.set_label("%s" % last_timestep.units)
# limit the colorbar to 8 tick marks
import matplotlib.ticker
colorbar.locator = matplotlib.ticker.MaxNLocator(8)
colorbar.update_ticks()
# get the time for the entire plot
time_coord = last_timestep.coord("time")
time = time_coord.units.num2date(time_coord.bounds[0, 0])
# set a global title for the postage stamps with the date formated by
# "monthname year"
plt.suptitle(
"Surface temperature ensemble forecasts for %s"
% (time.strftime("%B %Y"),)
)
iplt.show()
# -------------------------------------------------------------------------
# Plot #2: ENSO plumes
# -------------------------------------------------------------------------
# Nino 3.4 lies between: 170W and 120W, 5N and 5S, so define a constraint
# which matches this
nino_3_4_constraint = iris.Constraint(
longitude=lambda v: -170 + 360 <= v <= -120 + 360,
latitude=lambda v: -5 <= v <= 5,
)
nino_cube = surface_temp.extract(nino_3_4_constraint)
# Subsetting a circular longitude coordinate always results in a circular
# coordinate, so set the coordinate to be non-circular
nino_cube.coord("longitude").circular = False
# Calculate the horizontal mean for the nino region
mean = nino_cube.collapsed(["latitude", "longitude"], iris.analysis.MEAN)
# Calculate the ensemble mean of the horizontal mean. To do this, remove
# the "forecast_period" and "forecast_reference_time" coordinates which
# span both "relalization" and "time".
mean.remove_coord("forecast_reference_time")
mean.remove_coord("forecast_period")
ensemble_mean = mean.collapsed("realization", iris.analysis.MEAN)
# take the ensemble mean from each ensemble member
mean -= ensemble_mean.data
plt.figure()
for ensemble_member in mean.slices(["time"]):
# draw each ensemble member as a dashed line in black
iplt.plot(ensemble_member, "--k")
plt.title("Mean temperature anomaly for ENSO 3.4 region")
plt.xlabel("Time")
plt.ylabel("Temperature anomaly / K")
iplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
IshankGulati/scikit-learn | examples/linear_model/plot_logistic.py | 73 | 1568 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic function
=========================================================
Shown in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logistic curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.show()
| bsd-3-clause |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/plotly/matplotlylib/mplexporter/renderers/base.py | 44 | 14355 | import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| apache-2.0 |
raghavrv/scikit-learn | examples/plot_compare_reduction.py | 45 | 4959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=================================================================
Selecting dimensionality reduction with Pipeline and GridSearchCV
=================================================================
This example constructs a pipeline that does dimensionality
reduction followed by prediction with a support vector
classifier. It demonstrates the use of ``GridSearchCV`` and
``Pipeline`` to optimize over different classes of estimators in a
single CV run -- unsupervised ``PCA`` and ``NMF`` dimensionality
reductions are compared to univariate feature selection during
the grid search.
Additionally, ``Pipeline`` can be instantiated with the ``memory``
argument to memoize the transformers within the pipeline, avoiding to fit
again the same transformers over and over.
Note that the use of ``memory`` to enable caching becomes interesting when the
fitting of a transformer is costly.
"""
###############################################################################
# Illustration of ``Pipeline`` and ``GridSearchCV``
###############################################################################
# This section illustrates the use of a ``Pipeline`` with
# ``GridSearchCV``
# Authors: Robert McGibbon, Joel Nothman, Guillaume Lemaitre
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
print(__doc__)
pipe = Pipeline([
('reduce_dim', PCA()),
('classify', LinearSVC())
])
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1, 10, 100, 1000]
param_grid = [
{
'reduce_dim': [PCA(iterated_power=7), NMF()],
'reduce_dim__n_components': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
{
'reduce_dim': [SelectKBest(chi2)],
'reduce_dim__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
]
reducer_labels = ['PCA', 'NMF', 'KBest(chi2)']
grid = GridSearchCV(pipe, cv=3, n_jobs=1, param_grid=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
mean_scores = np.array(grid.cv_results_['mean_test_score'])
# scores are in the order of param_grid iteration, which is alphabetical
mean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))
# select score for best C
mean_scores = mean_scores.max(axis=0)
bar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *
(len(reducer_labels) + 1) + .5)
plt.figure()
COLORS = 'bgrcmyk'
for i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):
plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])
plt.title("Comparing feature reduction techniques")
plt.xlabel('Reduced number of features')
plt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)
plt.ylabel('Digit classification accuracy')
plt.ylim((0, 1))
plt.legend(loc='upper left')
###############################################################################
# Caching transformers within a ``Pipeline``
###############################################################################
# It is sometimes worthwhile storing the state of a specific transformer
# since it could be used again. Using a pipeline in ``GridSearchCV`` triggers
# such situations. Therefore, we use the argument ``memory`` to enable caching.
#
# .. warning::
# Note that this example is, however, only an illustration since for this
# specific case fitting PCA is not necessarily slower than loading the
# cache. Hence, use the ``memory`` constructor parameter when the fitting
# of a transformer is costly.
from tempfile import mkdtemp
from shutil import rmtree
from sklearn.externals.joblib import Memory
# Create a temporary folder to store the transformers of the pipeline
cachedir = mkdtemp()
memory = Memory(cachedir=cachedir, verbose=10)
cached_pipe = Pipeline([('reduce_dim', PCA()),
('classify', LinearSVC())],
memory=memory)
# This time, a cached pipeline will be used within the grid search
grid = GridSearchCV(cached_pipe, cv=3, n_jobs=1, param_grid=param_grid)
digits = load_digits()
grid.fit(digits.data, digits.target)
# Delete the temporary cache before exiting
rmtree(cachedir)
###############################################################################
# The ``PCA`` fitting is only computed at the evaluation of the first
# configuration of the ``C`` parameter of the ``LinearSVC`` classifier. The
# other configurations of ``C`` will trigger the loading of the cached ``PCA``
# estimator data, leading to save processing time. Therefore, the use of
# caching the pipeline using ``memory`` is highly beneficial when fitting
# a transformer is costly.
plt.show()
| bsd-3-clause |
cwu2011/scikit-learn | examples/manifold/plot_manifold_sphere.py | 258 | 5101 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
# Perform t-distributed stochastic neighbor embedding.
t0 = time()
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
trans_data = tsne.fit_transform(sphere_data).T
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
liyu1990/sklearn | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/neighbors/plot_classification.py | 58 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
davidgbe/scikit-learn | examples/preprocessing/plot_robust_scaling.py | 221 | 2702 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
wschenck/nest-simulator | pynest/examples/plot_weight_matrices.py | 9 | 6702 | # -*- coding: utf-8 -*-
#
# plot_weight_matrices.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Plot weight matrices example
----------------------------
This example demonstrates how to extract the connection strength
for all the synapses among two populations of neurons and gather
these values in weight matrices for further analysis and visualization.
All connection types between these populations are considered, i.e.,
four weight matrices are created and plotted.
"""
###############################################################################
# First, we import all necessary modules to extract, handle and plot
# the connectivity matrices
import numpy as np
import matplotlib.pyplot as plt
import nest
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
###############################################################################
# We now specify a function to extract and plot weight matrices for all
# connections among `E_neurons` and `I_neurons`.
#
# We initialize all the matrices, whose dimensionality is determined by the
# number of elements in each population.
# Since in this example, we have 2 populations (E/I), :math:`2^2` possible
# synaptic connections exist (EE, EI, IE, II).
def plot_weight_matrices(E_neurons, I_neurons):
W_EE = np.zeros([len(E_neurons), len(E_neurons)])
W_EI = np.zeros([len(I_neurons), len(E_neurons)])
W_IE = np.zeros([len(E_neurons), len(I_neurons)])
W_II = np.zeros([len(I_neurons), len(I_neurons)])
a_EE = nest.GetConnections(E_neurons, E_neurons)
'''
Using `get`, we can extract the value of the connection weight,
for all the connections between these populations
'''
c_EE = a_EE.weight
'''
Repeat the two previous steps for all other connection types
'''
a_EI = nest.GetConnections(I_neurons, E_neurons)
c_EI = a_EI.weight
a_IE = nest.GetConnections(E_neurons, I_neurons)
c_IE = a_IE.weight
a_II = nest.GetConnections(I_neurons, I_neurons)
c_II = a_II.weight
'''
We now iterate through the range of all connections of each type.
To populate the corresponding weight matrix, we begin by identifying
the source-node_id (by using .source) and the target-node_id.
For each node_id, we subtract the minimum node_id within the corresponding
population, to assure the matrix indices range from 0 to the size of
the population.
After determining the matrix indices [i, j], for each connection
object, the corresponding weight is added to the entry W[i,j].
The procedure is then repeated for all the different connection types.
'''
a_EE_src = a_EE.source
a_EE_trg = a_EE.target
a_EI_src = a_EI.source
a_EI_trg = a_EI.target
a_IE_src = a_IE.source
a_IE_trg = a_IE.target
a_II_src = a_II.source
a_II_trg = a_II.target
for idx in range(len(a_EE)):
W_EE[a_EE_src[idx] - min(E_neurons),
a_EE_trg[idx] - min(E_neurons)] += c_EE[idx]
for idx in range(len(a_EI)):
W_EI[a_EI_src[idx] - min(I_neurons),
a_EI_trg[idx] - min(E_neurons)] += c_EI[idx]
for idx in range(len(a_IE)):
W_IE[a_IE_src[idx] - min(E_neurons),
a_IE_trg[idx] - min(I_neurons)] += c_IE[idx]
for idx in range(len(a_II)):
W_II[a_II_src[idx] - min(I_neurons),
a_II_trg[idx] - min(I_neurons)] += c_II[idx]
fig = plt.figure()
fig.subtitle('Weight matrices', fontsize=14)
gs = gridspec.GridSpec(4, 4)
ax1 = plt.subplot(gs[:-1, :-1])
ax2 = plt.subplot(gs[:-1, -1])
ax3 = plt.subplot(gs[-1, :-1])
ax4 = plt.subplot(gs[-1, -1])
plt1 = ax1.imshow(W_EE, cmap='jet')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt1, cax=cax)
ax1.set_title('W_{EE}')
plt.tight_layout()
plt2 = ax2.imshow(W_IE)
plt2.set_cmap('jet')
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt2, cax=cax)
ax2.set_title('W_{EI}')
plt.tight_layout()
plt3 = ax3.imshow(W_EI)
plt3.set_cmap('jet')
divider = make_axes_locatable(ax3)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt3, cax=cax)
ax3.set_title('W_{IE}')
plt.tight_layout()
plt4 = ax4.imshow(W_II)
plt4.set_cmap('jet')
divider = make_axes_locatable(ax4)
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(plt4, cax=cax)
ax4.set_title('W_{II}')
plt.tight_layout()
#################################################################################
# The script iterates through the list of all connections of each type.
# To populate the corresponding weight matrix, we identify the source-node_id
# (first element of each connection object, `n[0]`) and the target-node_id (second
# element of each connection object, `n[1]`).
# For each `node_id`, we subtract the minimum `node_id` within the corresponding
# population, to assure the matrix indices range from 0 to the size of the
# population.
#
# After determining the matrix indices `[i, j]`, for each connection object, the
# corresponding weight is added to the entry `W[i,j]`. The procedure is then
# repeated for all the different connection types.
#
# We then plot the figure, specifying the properties we want. For example, we
# can display all the weight matrices in a single figure, which requires us to
# use ``GridSpec`` to specify the spatial arrangement of the axes.
# A subplot is subsequently created for each connection type. Using ``imshow``,
# we can visualize the weight matrix in the corresponding axis. We can also
# specify the colormap for this image.
# Using the ``axis_divider`` module from ``mpl_toolkits``, we can allocate a small
# extra space on the right of the current axis, which we reserve for a
# colorbar.
# We can set the title of each axis and adjust the axis subplot parameters.
# Finally, the last three steps are repeated for each synapse type.
| gpl-2.0 |
CompPhysics/MachineLearning | doc/src/week43/programs/ode.py | 1 | 4240 | import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
# Assuming one input, hidden, and output layer
def neural_network(params, x):
# Find the weights (including and biases) for the hidden and output layer.
# Assume that params is a list of parameters for each layer.
# The biases are the first element for each array in params,
# and the weights are the remaning elements in each array in params.
w_hidden = params[0]
w_output = params[1]
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
## Hidden layer:
# Add a row of ones to include bias
x_input = np.concatenate((np.ones((1,num_values)), x_input ), axis = 0)
z_hidden = np.matmul(w_hidden, x_input)
x_hidden = sigmoid(z_hidden)
## Output layer:
# Include bias:
x_hidden = np.concatenate((np.ones((1,num_values)), x_hidden ), axis = 0)
z_output = np.matmul(w_output, x_hidden)
x_output = z_output
return x_output
# The trial solution using the deep neural network:
def g_trial(x,params, g0 = 10):
return g0 + x*neural_network(params,x)
# The right side of the ODE:
def g(x, g_trial, gamma = 2):
return -gamma*g_trial
# The cost function:
def cost_function(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial(x,P)
# Find the derivative w.r.t x of the neural network
d_net_out = elementwise_grad(neural_network,1)(P,x)
# Find the derivative w.r.t x of the trial function
d_g_t = elementwise_grad(g_trial,0)(x,P)
# The right side of the ODE
func = g(x, g_t)
err_sqr = (d_g_t - func)**2
cost_sum = np.sum(err_sqr)
return cost_sum / np.size(err_sqr)
# Solve the exponential decay ODE using neural network with one input, hidden, and output layer
def solve_ode_neural_network(x, num_neurons_hidden, num_iter, lmb):
## Set up initial weights and biases
# For the hidden layer
p0 = npr.randn(num_neurons_hidden, 2 )
# For the output layer
p1 = npr.randn(1, num_neurons_hidden + 1 ) # +1 since bias is included
P = [p0, p1]
print('Initial cost: %g'%cost_function(P, x))
## Start finding the optimal weights using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_grad = grad(cost_function,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of two arrays;
# one for the gradient w.r.t P_hidden and
# one for the gradient w.r.t P_output
cost_grad = cost_function_grad(P, x)
P[0] = P[0] - lmb * cost_grad[0]
P[1] = P[1] - lmb * cost_grad[1]
print('Final cost: %g'%cost_function(P, x))
return P
def g_analytic(x, gamma = 2, g0 = 10):
return g0*np.exp(-gamma*x)
# Solve the given problem
if __name__ == '__main__':
# Set seed such that the weight are initialized
# with same weights and biases for every run.
npr.seed(15)
## Decide the vales of arguments to the function to solve
N = 10
x = np.linspace(0, 1, N)
## Set up the initial parameters
num_hidden_neurons = 10
num_iter = 10000
lmb = 0.001
# Use the network
P = solve_ode_neural_network(x, num_hidden_neurons, num_iter, lmb)
# Print the deviation from the trial solution and true solution
res = g_trial(x,P)
res_analytical = g_analytic(x)
print('Max absolute difference: %g'%np.max(np.abs(res - res_analytical)))
# Plot the results
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(x, res_analytical)
plt.plot(x, res[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('x')
plt.ylabel('g(x)')
plt.show()
| cc0-1.0 |
bentzinir/Buffe | Applications/mgail/environments/halfcheetah/forward_model_vanilla.py | 3 | 4007 | import tensorflow as tf
import numpy as np
import common
import matplotlib.pyplot as plt
from collections import OrderedDict
class ForwardModel(object):
def __init__(self, state_size, action_size, rho=0.05, beta=0.3, encoding_size=50, batch_size=50, multi_layered_encoder=True, num_steps=1,
separate_encoders=True, merger=tf.mul, activation=tf.sigmoid, dropout_keep=0.5, lstm=False):
self.state_size = state_size
self.action_size = action_size
self.multi_layered_encoder = multi_layered_encoder
self.separate_encoders = separate_encoders
self.merger = merger
self.num_steps = num_steps
self.activation = activation
self.dropout_keep = dropout_keep
self.lstm = lstm
self.arch_params = {
'input_dim': lstm and (state_size + action_size) or (state_size + action_size),
'encoding_dim': encoding_size,
'small_encoding_dim': 5,
'output_dim': state_size
}
self.sparsity_params = {
'rho': tf.constant(rho),
'beta': tf.constant(beta)
}
self.training_params = {
'lr': 1e-2,
'batch_size': batch_size
}
# set all the necessary weights and biases according to the forward model structure
self.weights = OrderedDict()
self.weights.update(self.linear_variables(state_size + action_size, self.arch_params['encoding_dim'], 'encoder1'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['encoding_dim'], 'encoder2'))
self.weights.update(self.linear_variables(self.arch_params['encoding_dim'], self.arch_params['output_dim'], 'encoder3'))
self.states_normalizer = []
self.actions_normalizer = []
self.states_min = []
def linear_variables(self, input_size, output_size, name):
weights = OrderedDict()
self.weights[name+'_weights'] = self.weight_variable([input_size, output_size])
self.weights[name+'_biases'] = self.weight_variable([1, output_size])
return weights
def weight_variable(self, shape):
initial = tf.random_normal(shape, stddev=0.01, dtype=tf.float32)
return tf.Variable(initial)
def encode(self, input):
state = tf.cast(input[0], tf.float32)
action = tf.cast(input[1], tf.float32)
gru_state = tf.cast(input[2], tf.float32)
concat = tf.concat(concat_dim=1, values=[state, action], name='input')
h0 = tf.nn.relu(tf.matmul(concat, self.weights["encoder1_weights"]) + self.weights["encoder1_biases"])
h1 = tf.nn.relu(tf.matmul(h0, self.weights["encoder2_weights"]) + self.weights["encoder2_biases"])
h1_do = tf.nn.dropout(h1, self.dropout_keep)
delta = tf.matmul(h1_do, self.weights["encoder3_weights"]) + self.weights["encoder3_biases"]
previous_state = tf.stop_gradient(state)
output = previous_state + delta
return output, gru_state
def forward(self, input):
print('Forward Model Vanilla')
# run a forward pass
output, gru_state = self.encode(input)
sparsity_loss = []
return output, sparsity_loss, gru_state
def backward(self, loss):
# create an optimizer
opt = tf.train.AdamOptimizer(learning_rate=self.training_params['lr'])
# compute the gradients for a list of variables
grads_and_vars = opt.compute_gradients(loss=loss, var_list=self.weights.values())
mean_abs_grad, mean_abs_w = common.compute_mean_abs_norm(grads_and_vars)
# apply the gradient
apply_grads = opt.apply_gradients(grads_and_vars)
return apply_grads, mean_abs_grad, mean_abs_w
def train(self, objective):
self.loss = objective
self.minimize, self.mean_abs_grad, self.mean_abs_w = self.backward(self.loss)
self.loss_summary = tf.scalar_summary('loss_t', objective)
| mit |
msultan/msmbuilder | msmbuilder/utils/subsampler.py | 12 | 1413 | from __future__ import print_function, division, absolute_import
from sklearn.base import TransformerMixin
from ..base import BaseEstimator
__all__ = ['Subsampler']
class Subsampler(BaseEstimator, TransformerMixin):
"""Convert a list of feature time series (`X_all`) into a `lag_time`
subsampled time series.
Parameters
----------
lag_time : int
The lag time to subsample by
sliding_window : bool, default=True
If True, each time series is transformed into `lag_time` interlaced
sliding-window (not statistically independent) sequences. If
False, each time series is transformed into a single subsampled
time series.
"""
def __init__(self, lag_time, sliding_window=True):
self._lag_time = lag_time
self._sliding_window = sliding_window
def fit(self, X_all, y=None):
return self
def transform(self, X_all, y=None):
"""Subsample several time series.
Parameters
----------
X_all : list(np.ndarray)
List of feature time series
Returns
-------
features : list(np.ndarray), length = len(X_all)
The subsampled trajectories.
"""
if self._sliding_window:
return [X[k::self._lag_time] for k in range(self._lag_time) for X in X_all]
else:
return [X[::self._lag_time] for X in X_all]
| lgpl-2.1 |
google-research/federated | analytics/location_heatmaps/metrics.py | 1 | 5325 | # Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to get metrics.
This module computes F1, MSE, L1, L2 metrics for the image by transforming
the result to be comparable with the original image.
"""
import dataclasses
import numpy as np
from scipy import stats
from sklearn import metrics as mt
@dataclasses.dataclass
class Metrics:
"""Stores obtained metrics.
Attributes:
mse: mean squared error.
l1_distance: L1 distance.
l2_distance: L2 distance.
wasserstein: Wasserstein distance (e.g. earth's movers distance).
hotspots_count: count of the current hotspots.
f1: f1 score on the discovered hot spots.
mutual_info: mutual information metric.
"""
mse: float
l1_distance: float
l2_distance: float
wasserstein: float
hotspots_count: int
f1: float
mutual_info: float
def rescale_image(image: np.ndarray, total_size: int):
"""Scale up the image to a certain size.
Naive scaling method for a provided image with two dimensions of some
size perform scaling such that the final image has levelxlevel size. This
method simply duplicates values into the larger pixels.
Args:
image: initial 'unscaled' square-size image (np.array)
total_size: desired dimension, power of 2, divisible by the image size.
Returns:
scaled image array of size total_size x total_size.
"""
if total_size % image.shape[0] != 0:
raise ValueError('Provided scale size has to be divisible by image size.')
if image.shape[0] != image.shape[1]:
raise ValueError('Provided image needs to have a squared size.')
scale = int(total_size / image.shape[0])
new_image = np.zeros([total_size, total_size])
for i in range(scale):
for j in range(scale):
new_image[i::scale, j::scale] = image
return new_image
def normalize(vector: np.ndarray):
"""Normalizes the np.array to sum up to one and clips negative values to 0."""
arr = np.copy(vector)
arr[arr < 0] = 0
arr = arr / np.sum(arr)
return arr
def largest_indices(array: np.ndarray, top_k: int):
"""Compute top-k coordinates of the provided array.
Takes an image as np.array, computes indices of the largest elements, and
returns the list of the coordinates and an image with the largest elements
having value 1 and the rest of the image is 0.
Args:
array: data array
top_k: number of elements to select
Returns:
list of top k coordinates, zero array except top-k coordinates set to 1.
"""
flat = array.flatten()
# find the top-k elements (unsorted) in the flattened array
indices = np.argpartition(flat, -top_k)[-top_k:]
# unravel the flattened indices into the image shape
unraveled = np.unravel_index(indices, array.shape)
# create a set of coordinates with top-k elements and create an image.
tuples = set()
top_k_arr = np.zeros_like(array)
for i in range(top_k):
x_coord = unraveled[0][i]
y_coord = unraveled[1][i]
tuples.add((x_coord, y_coord))
top_k_arr[x_coord, y_coord] = 1
return tuples, top_k_arr
def get_metrics(test_image, true_image, top_k, total_size):
"""Computes multiple different metrics between two images.
We compute a variety of metrics on the input image: we output L1 and L2
distances, Wasserstein (earth movers) distance, hotspot count and f1 score for
the provided TOP-K parameter, and an MSE error. For the correct comparison the
images are scaled to the same size first,and then compared per coordinate.
Args:
test_image: obtained image to obtain the metrics
true_image: original image to compare against the test_image.
top_k: parameter to compute top-k hot spots.
total_size: the size to scale the images to.
Returns:
l2 dist, hot spot counts, movers distance, f1-score, l1 dist, mutual info,
MSE.
"""
# normalize the input images
test_image = normalize(rescale_image(test_image, total_size))
true_image = normalize(rescale_image(true_image, total_size))
top_k_test, top_k_test_arr = largest_indices(test_image, top_k)
top_k_true, top_k_true_arr = largest_indices(true_image, top_k)
l1_distance = np.linalg.norm(true_image - test_image, ord=1)
l2_distance = np.linalg.norm(true_image - test_image, ord=2)
mse = mt.mean_squared_error(test_image, true_image)
top_k_diff = len(top_k_true.intersection(top_k_test))
wasserstein = stats.wasserstein_distance(
test_image.reshape(-1), true_image.reshape(-1))
f1 = mt.f1_score(top_k_true_arr.reshape(-1), top_k_test_arr.reshape(-1))
mutual = mt.mutual_info_score(true_image.reshape(-1), test_image.reshape(-1))
metrics = Metrics(l1_distance=l1_distance, l2_distance=l2_distance,
mse=mse, f1=f1, wasserstein=wasserstein,
hotspots_count=top_k_diff, mutual_info=mutual)
return metrics
| apache-2.0 |
murali-munna/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
Clyde-fare/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
CallaJun/hackprince | indico/mpl_toolkits/mplot3d/art3d.py | 8 | 23462 | #!/usr/bin/python
# art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <[email protected]>
# Minor additions by Ben Axelrod <[email protected]>
'''
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib import lines, text as mtext, path as mpath, colors as mcolors
from matplotlib import artist
from matplotlib.collections import Collection, LineCollection, \
PolyCollection, PatchCollection, PathCollection
from matplotlib.cm import ScalarMappable
from matplotlib.patches import Patch
from matplotlib.colors import Normalize
from matplotlib.cbook import iterable
import warnings
import numpy as np
import math
from . import proj3d
def norm_angle(a):
"""Return angle between -180 and +180"""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
def norm_text_angle(a):
"""Return angle between -90 and +90"""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
def get_dir_vector(zdir):
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
'''
Text object with 3D position and (in the future) direction.
'''
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
'''
*x*, *y*, *z* Position of text
*text* Text string to display
*zdir* Direction of text
Keyword arguments are passed onto :func:`~matplotlib.text.Text`.
'''
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
def draw(self, renderer):
proj = proj3d.proj_trans_points([self._position3d, \
self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
if dx==0. and dy==0.:
# atan2 raises ValueError: math domain error on 0,0
angle = 0.
else:
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(norm_text_angle(angle))
mtext.Text.draw(self, renderer)
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
'''
3D line object.
'''
def __init__(self, xs, ys, zs, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
'''
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
# If *zs* is a list or array, then this will fail and
# just proceed to juggle_axes().
zs = float(zs)
zs = [zs for x in xs]
except TypeError:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
def line_2d_to_3d(line, zs=0, zdir='z'):
'''
Convert a 2D line to 3D.
'''
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def path_to_3d_segment(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
def paths_to_3d_segments(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
for path, pathz in zip(paths, zs):
segments.append(path_to_3d_segment(path, pathz, zdir))
return segments
class Line3DCollection(LineCollection):
'''
A collection of 3D lines.
'''
def __init__(self, segments, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.collections.LineCollection`.
'''
LineCollection.__init__(self, segments, *args, **kwargs)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_segments(self, segments):
'''
Set 3D segments
'''
self._segments3d = np.asanyarray(segments)
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
'''
Project the points according to renderer matrix.
'''
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [list(zip(xs, ys)) for (xs, ys, zs) in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for (xs, ys, zs) in xyslist:
minz = min(minz, min(zs))
return minz
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
'''
3D patch object.
'''
def __init__(self, *args, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
if not iterable(zs):
zs = np.ones(len(verts)) * zs
self._segment3d = [juggle_axes(x, y, z, zdir) \
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def draw(self, renderer):
Patch.draw(self, renderer)
class PathPatch3D(Patch3D):
'''
3D PathPatch object.
'''
def __init__(self, path, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
'''
A collection of 3D patches.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PatchCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PatchCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
class Path3DCollection(PathCollection):
'''
A collection of 3D paths.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PathCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PathCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a :class:`~matplotlib.collections.PatchCollection` into a
:class:`Patch3DCollection` object
(or a :class:`~matplotlib.collections.PathCollection` into a
:class:`Path3DCollection` object).
Keywords:
*za* The location or locations to place the patches in the
collection along the *zdir* axis. Defaults to 0.
*zdir* The axis in which to place the patches. Default is "z".
*depthshade* Whether to shade the patches to give a sense of depth.
Defaults to *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
'''
A collection of 3D polygons.
'''
def __init__(self, verts, *args, **kwargs):
'''
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
'''
self.set_zsort(kwargs.pop('zsort', True))
PolyCollection.__init__(self, verts, *args, **kwargs)
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
'''
Set z-sorting behaviour:
boolean: if True use default 'average'
string: 'average', 'min' or 'max'
'''
if zsort is True:
zsort = 'average'
if zsort is not False:
if zsort in self._zsort_functions:
zsortfunc = self._zsort_functions[zsort]
else:
return False
else:
zsortfunc = None
self._zsort = zsort
self._sort_zpos = None
self._zsortfunc = zsortfunc
def get_vector(self, segments3d):
"""Optimize points for projection"""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si+len(p)
segis.append((si, ei))
si = ei
if len(segments3d) > 0 :
xs, ys, zs = list(zip(*points))
else :
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
'''Set 3D vertices.'''
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], closed)
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort(True)
self._facecolors3d = PolyCollection.get_facecolors(self)
self._edgecolors3d = PolyCollection.get_edgecolors(self)
self._alpha3d = PolyCollection.get_alpha(self)
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
def do_3d_projection(self, renderer):
'''
Perform the 3D projection for this object.
'''
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei]) \
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
cedge = cedge.repeat(len(xyzlist), axis=0)
# if required sort by depth (furthest drawn first)
if self._zsort:
z_segments_2d = [(self._zsortfunc(zs), list(zip(xs, ys)), fc, ec) for
(xs, ys, zs), fc, ec in zip(xyzlist, cface, cedge)]
z_segments_2d.sort(key=lambda x: x[0], reverse=True)
else:
raise ValueError("whoops")
segments_2d = [s for z, s, fc, ec in z_segments_2d]
PolyCollection.set_verts(self, segments_2d)
self._facecolors2d = [fc for z, s, fc, ec in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d.proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0 :
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else :
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
set_facecolors = set_facecolor
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
set_edgecolors = set_edgecolor
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.colorConverter.to_rgba_array(
self._facecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.colorConverter.to_rgba_array(
self._edgecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_facecolors(self):
return self._facecolors2d
get_facecolor = get_facecolors
def get_edgecolors(self):
return self._edgecolors2d
get_edgecolor = get_edgecolors
def draw(self, renderer):
return Collection.draw(self, renderer)
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts(segments_3d)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def iscolor(c):
try:
if len(c) == 4 or len(c) == 3:
if iterable(c[0]):
return False
if hasattr(c[0], '__float__'):
return True
except:
return False
return False
def get_colors(c, num):
"""Stretch the color argument to provide the required number num"""
if type(c) == type("string"):
c = mcolors.colorConverter.to_rgba(c)
if iscolor(c):
return [c] * num
if len(c) == num:
return c
elif iscolor(c):
return [c] * num
elif len(c) == 0: #if edgecolor or facecolor is specified as 'none'
return [[0,0,0,0]] * num
elif iscolor(c[0]):
return [c[0]] * num
else:
raise ValueError('unknown color format %s' % c)
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth"""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
colors = get_colors(colors, len(zs))
if zs.size > 0 :
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]
return colors
| lgpl-3.0 |
kaiserroll14/301finalproject | main/pandas/io/tests/test_data.py | 9 | 20115 | from __future__ import print_function
from pandas import compat
import warnings
import nose
from nose.tools import assert_equal
from datetime import datetime
import os
import numpy as np
import pandas as pd
from pandas import DataFrame, Timestamp
from pandas.util.testing import (assert_series_equal, assert_produces_warning,
network, assert_frame_equal)
import pandas.util.testing as tm
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
from pandas.io import data as web
from pandas.io.data import DataReader, SymbolWarning, RemoteDataError, _yahoo_codes
if compat.PY3:
from urllib.error import HTTPError
else:
from urllib2 import HTTPError
def _skip_if_no_lxml():
try:
import lxml
except ImportError:
raise nose.SkipTest("no lxml")
def _skip_if_no_bs():
try:
import bs4
import html5lib
except ImportError:
raise nose.SkipTest("no html5lib/bs4")
def assert_n_failed_equals_n_null_columns(wngs, obj, cls=SymbolWarning):
all_nan_cols = pd.Series(dict((k, pd.isnull(v).all()) for k, v in
compat.iteritems(obj)))
n_all_nan_cols = all_nan_cols.sum()
valid_warnings = pd.Series([wng for wng in wngs if wng.category == cls])
assert_equal(len(valid_warnings), n_all_nan_cols)
failed_symbols = all_nan_cols[all_nan_cols].index
msgs = valid_warnings.map(lambda x: x.message)
assert msgs.str.contains('|'.join(failed_symbols)).all()
class TestGoogle(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestGoogle, cls).setUpClass()
cls.locales = tm.get_locales(prefix='en_US')
if not cls.locales:
raise nose.SkipTest("US English locale not available for testing")
@classmethod
def tearDownClass(cls):
super(TestGoogle, cls).tearDownClass()
del cls.locales
@network
def test_google(self):
# asserts that google is minimally working and that it throws
# an exception when DataReader can't get a 200 response from
# google
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
for locale in self.locales:
with tm.set_locale(locale):
panel = web.DataReader("F", 'google', start, end)
self.assertEqual(panel.Close[-1], 13.68)
self.assertRaises(Exception, web.DataReader, "NON EXISTENT TICKER",
'google', start, end)
@network
def test_get_quote_fails(self):
self.assertRaises(NotImplementedError, web.get_quote_google,
pd.Series(['GOOG', 'AAPL', 'GOOG']))
@network
def test_get_goog_volume(self):
for locale in self.locales:
with tm.set_locale(locale):
df = web.get_data_google('GOOG').sort_index()
self.assertEqual(df.Volume.ix['JAN-02-2015'], 1446662)
@network
def test_get_multi1(self):
for locale in self.locales:
sl = ['AAPL', 'AMZN', 'GOOG']
with tm.set_locale(locale):
pan = web.get_data_google(sl, '2012', '2013')
ts = pan.Close.GOOG.index[pan.Close.AAPL < pan.Close.GOOG]
if (hasattr(pan, 'Close') and hasattr(pan.Close, 'GOOG') and
hasattr(pan.Close, 'AAPL')):
self.assertEqual(ts[0].dayofyear, 3)
else:
self.assertRaises(AttributeError, lambda: pan.Close)
@network
def test_get_multi_invalid(self):
sl = ['AAPL', 'AMZN', 'INVALID']
with tm.assert_produces_warning(SymbolWarning):
pan = web.get_data_google(sl, '2012')
self.assertIn('INVALID', pan.minor_axis)
@network
def test_get_multi_all_invalid(self):
sl = ['INVALID', 'INVALID2', 'INVALID3']
with tm.assert_produces_warning(SymbolWarning):
self.assertRaises(RemoteDataError, web.get_data_google, sl, '2012')
@network
def test_get_multi2(self):
with warnings.catch_warnings(record=True) as w:
for locale in self.locales:
with tm.set_locale(locale):
pan = web.get_data_google(['GE', 'MSFT', 'INTC'],
'JAN-01-12', 'JAN-31-12')
result = pan.Close.ix['01-18-12']
assert_n_failed_equals_n_null_columns(w, result)
# sanity checking
self.assertTrue(np.issubdtype(result.dtype, np.floating))
result = pan.Open.ix['Jan-15-12':'Jan-20-12']
self.assertEqual((4, 3), result.shape)
assert_n_failed_equals_n_null_columns(w, result)
@network
def test_dtypes(self):
#GH3995, #GH8980
data = web.get_data_google('F', start='JAN-01-10', end='JAN-27-13')
self.assertTrue(np.issubdtype(data.Open.dtype, np.number))
self.assertTrue(np.issubdtype(data.Close.dtype, np.number))
self.assertTrue(np.issubdtype(data.Low.dtype, np.number))
self.assertTrue(np.issubdtype(data.High.dtype, np.number))
self.assertTrue(np.issubdtype(data.Volume.dtype, np.number))
@network
def test_unicode_date(self):
#GH8967
data = web.get_data_google('F', start='JAN-01-10', end='JAN-27-13')
self.assertEqual(data.index.name, 'Date')
class TestYahoo(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestYahoo, cls).setUpClass()
_skip_if_no_lxml()
@network
def test_yahoo(self):
# asserts that yahoo is minimally working and that it throws
# an exception when DataReader can't get a 200 response from
# yahoo
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
self.assertEqual(web.DataReader("F", 'yahoo', start, end)['Close'][-1],
13.68)
@network
def test_yahoo_fails(self):
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
self.assertRaises(Exception, web.DataReader, "NON EXISTENT TICKER",
'yahoo', start, end)
@network
def test_get_quote_series(self):
df = web.get_quote_yahoo(pd.Series(['GOOG', 'AAPL', 'GOOG']))
assert_series_equal(df.ix[0], df.ix[2])
@network
def test_get_quote_string(self):
df = web.get_quote_yahoo('GOOG')
@network
def test_get_quote_string(self):
_yahoo_codes.update({'MarketCap': 'j1'})
df = web.get_quote_yahoo('GOOG')
self.assertFalse(pd.isnull(df['MarketCap'][0]))
@network
def test_get_quote_stringlist(self):
df = web.get_quote_yahoo(['GOOG', 'AAPL', 'GOOG'])
assert_series_equal(df.ix[0], df.ix[2])
@network
def test_get_components_dow_jones(self):
raise nose.SkipTest('unreliable test, receive partial components back for dow_jones')
df = web.get_components_yahoo('^DJI') #Dow Jones
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(len(df), 30)
@network
def test_get_components_dax(self):
raise nose.SkipTest('unreliable test, receive partial components back for dax')
df = web.get_components_yahoo('^GDAXI') #DAX
self.assertIsInstance(df, pd.DataFrame)
self.assertEqual(len(df), 30)
self.assertEqual(df[df.name.str.contains('adidas', case=False)].index,
'ADS.DE')
@network
def test_get_components_nasdaq_100(self):
# as of 7/12/13 the conditional will test false because the link is invalid
raise nose.SkipTest('unreliable test, receive partial components back for nasdaq_100')
df = web.get_components_yahoo('^NDX') #NASDAQ-100
self.assertIsInstance(df, pd.DataFrame)
if len(df) > 1:
# Usual culprits, should be around for a while
self.assertTrue('AAPL' in df.index)
self.assertTrue('GOOG' in df.index)
self.assertTrue('AMZN' in df.index)
else:
expected = DataFrame({'exchange': 'N/A', 'name': '@^NDX'},
index=['@^NDX'])
assert_frame_equal(df, expected)
@network
def test_get_data_single_symbol(self):
#single symbol
#http://finance.yahoo.com/q/hp?s=GOOG&a=09&b=08&c=2010&d=09&e=10&f=2010&g=d
# just test that we succeed
web.get_data_yahoo('GOOG')
@network
def test_get_data_interval(self):
# daily interval data
pan = web.get_data_yahoo('XOM', '2013-01-01', '2013-12-31', interval='d')
self.assertEqual(len(pan), 252)
# weekly interval data
pan = web.get_data_yahoo('XOM', '2013-01-01', '2013-12-31', interval='w')
self.assertEqual(len(pan), 53)
# montly interval data
pan = web.get_data_yahoo('XOM', '2013-01-01', '2013-12-31', interval='m')
self.assertEqual(len(pan), 12)
# dividend data
pan = web.get_data_yahoo('XOM', '2013-01-01', '2013-12-31', interval='v')
self.assertEqual(len(pan), 4)
# test fail on invalid interval
self.assertRaises(ValueError, web.get_data_yahoo, 'XOM', interval='NOT VALID')
@network
def test_get_data_multiple_symbols(self):
# just test that we succeed
sl = ['AAPL', 'AMZN', 'GOOG']
web.get_data_yahoo(sl, '2012')
@network
def test_get_data_multiple_symbols_two_dates(self):
pan = web.get_data_yahoo(['GE', 'MSFT', 'INTC'], 'JAN-01-12',
'JAN-31-12')
result = pan.Close.ix['01-18-12']
self.assertEqual(len(result), 3)
# sanity checking
self.assertTrue(np.issubdtype(result.dtype, np.floating))
expected = np.array([[18.99, 28.4, 25.18],
[18.58, 28.31, 25.13],
[19.03, 28.16, 25.52],
[18.81, 28.82, 25.87]])
result = pan.Open.ix['Jan-15-12':'Jan-20-12']
self.assertEqual(expected.shape, result.shape)
@network
def test_get_date_ret_index(self):
pan = web.get_data_yahoo(['GE', 'INTC', 'IBM'], '1977', '1987',
ret_index=True)
self.assertTrue(hasattr(pan, 'Ret_Index'))
if hasattr(pan, 'Ret_Index') and hasattr(pan.Ret_Index, 'INTC'):
tstamp = pan.Ret_Index.INTC.first_valid_index()
result = pan.Ret_Index.ix[tstamp]['INTC']
self.assertEqual(result, 1.0)
# sanity checking
self.assertTrue(np.issubdtype(pan.values.dtype, np.floating))
class TestYahooOptions(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestYahooOptions, cls).setUpClass()
_skip_if_no_lxml()
_skip_if_no_bs()
# aapl has monthlies
cls.aapl = web.Options('aapl', 'yahoo')
d = (Timestamp.today() + pd.offsets.MonthBegin(1)).normalize()
cls.year = d.year
cls.month = d.month
cls.expiry = d
cls.expiry2 = d + pd.offsets.MonthBegin(1)
cls.dirpath = tm.get_data_path()
cls.html1 = os.path.join(cls.dirpath, 'yahoo_options1.html')
cls.html2 = os.path.join(cls.dirpath, 'yahoo_options2.html')
cls.html3 = os.path.join(cls.dirpath, 'yahoo_options3.html') #Empty table GH#22
cls.data1 = cls.aapl._option_frames_from_url(cls.html1)['puts']
@classmethod
def tearDownClass(cls):
super(TestYahooOptions, cls).tearDownClass()
del cls.aapl, cls.expiry
@network
def test_get_options_data(self):
# regression test GH6105
self.assertRaises(ValueError, self.aapl.get_options_data, month=3)
self.assertRaises(ValueError, self.aapl.get_options_data, year=1992)
try:
options = self.aapl.get_options_data(expiry=self.expiry)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(options) > 1)
@network
def test_get_near_stock_price(self):
try:
options = self.aapl.get_near_stock_price(call=True, put=True,
expiry=[self.expiry,self.expiry2])
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(options) > 1)
@network
def test_get_call_data(self):
try:
calls = self.aapl.get_call_data(expiry=self.expiry)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(calls) > 1)
@network
def test_get_put_data(self):
try:
puts = self.aapl.get_put_data(expiry=self.expiry)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(puts) > 1)
@network
def test_get_expiry_dates(self):
try:
dates, _ = self.aapl._get_expiry_dates_and_links()
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(dates) > 1)
@network
def test_get_all_data(self):
try:
data = self.aapl.get_all_data(put=True)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(data) > 1)
@network
def test_get_data_with_list(self):
try:
data = self.aapl.get_call_data(expiry=self.aapl.expiry_dates)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(data) > 1)
@network
def test_get_all_data_calls_only(self):
try:
data = self.aapl.get_all_data(call=True, put=False)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(data) > 1)
@network
def test_get_underlying_price(self):
#GH7
try:
options_object = web.Options('^spxpm', 'yahoo')
url = options_object._yahoo_url_from_expiry(options_object.expiry_dates[0])
root = options_object._parse_url(url)
quote_price = options_object._underlying_price_from_root(root)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertIsInstance(quote_price, float)
def test_sample_page_price_quote_time1(self):
#Tests the weekend quote time format
price, quote_time = self.aapl._underlying_price_and_time_from_url(self.html1)
self.assertIsInstance(price, (int, float, complex))
self.assertIsInstance(quote_time, (datetime, Timestamp))
def test_chop(self):
#regression test for #7625
self.aapl.chop_data(self.data1, above_below=2, underlying_price=np.nan)
chopped = self.aapl.chop_data(self.data1, above_below=2, underlying_price=100)
self.assertIsInstance(chopped, DataFrame)
self.assertTrue(len(chopped) > 1)
def test_chop_out_of_strike_range(self):
#regression test for #7625
self.aapl.chop_data(self.data1, above_below=2, underlying_price=np.nan)
chopped = self.aapl.chop_data(self.data1, above_below=2, underlying_price=100000)
self.assertIsInstance(chopped, DataFrame)
self.assertTrue(len(chopped) > 1)
@network
def test_sample_page_price_quote_time2(self):
#Tests the EDT page format
#regression test for #8741
price, quote_time = self.aapl._underlying_price_and_time_from_url(self.html2)
self.assertIsInstance(price, (int, float, complex))
self.assertIsInstance(quote_time, (datetime, Timestamp))
@network
def test_sample_page_chg_float(self):
#Tests that numeric columns with comma's are appropriately dealt with
self.assertEqual(self.data1['Chg'].dtype, 'float64')
@network
def test_month_year(self):
try:
data = self.aapl.get_call_data(month=self.month, year=self.year)
except RemoteDataError as e:
raise nose.SkipTest(e)
self.assertTrue(len(data) > 1)
@network
def test_empty_table(self):
#GH22
empty = self.aapl._option_frames_from_url(self.html3)['puts']
self.assertTrue(len(empty) == 0)
class TestOptionsWarnings(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestOptionsWarnings, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestOptionsWarnings, cls).tearDownClass()
@network
def test_options_source_warning(self):
with assert_produces_warning():
aapl = web.Options('aapl')
class TestDataReader(tm.TestCase):
def test_is_s3_url(self):
from pandas.io.common import _is_s3_url
self.assertTrue(_is_s3_url("s3://pandas/somethingelse.com"))
@network
def test_read_yahoo(self):
gs = DataReader("GS", "yahoo")
self.assertIsInstance(gs, DataFrame)
@network
def test_read_google(self):
gs = DataReader("GS", "google")
self.assertIsInstance(gs, DataFrame)
@network
def test_read_fred(self):
vix = DataReader("VIXCLS", "fred")
self.assertIsInstance(vix, DataFrame)
@network
def test_read_famafrench(self):
for name in ("F-F_Research_Data_Factors",
"F-F_Research_Data_Factors_weekly", "6_Portfolios_2x3",
"F-F_ST_Reversal_Factor", "F-F_Momentum_Factor"):
ff = DataReader(name, "famafrench")
self.assertTrue(ff is not None)
self.assertIsInstance(ff, dict)
class TestFred(tm.TestCase):
@network
def test_fred(self):
# Throws an exception when DataReader can't get a 200 response from
# FRED.
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
received = web.DataReader("GDP", "fred", start, end)['GDP'].tail(1)[0]
self.assertTrue(int(received) > 10000)
self.assertRaises(Exception, web.DataReader, "NON EXISTENT SERIES",
'fred', start, end)
@network
def test_fred_nan(self):
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
df = web.DataReader("DFII5", "fred", start, end)
self.assertTrue(pd.isnull(df.ix['2010-01-01'][0]))
@network
def test_fred_parts(self):
raise nose.SkipTest('buggy as of 2/18/14; maybe a data revision?')
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
df = web.get_data_fred("CPIAUCSL", start, end)
self.assertEqual(df.ix['2010-05-01'][0], 217.23)
t = df.CPIAUCSL.values
self.assertTrue(np.issubdtype(t.dtype, np.floating))
self.assertEqual(t.shape, (37,))
@network
def test_fred_part2(self):
expected = [[576.7],
[962.9],
[684.7],
[848.3],
[933.3]]
result = web.get_data_fred("A09024USA144NNBR", start="1915").ix[:5]
tm.assert_numpy_array_equal(result.values, np.array(expected))
@network
def test_invalid_series(self):
name = "NOT A REAL SERIES"
self.assertRaises(Exception, web.get_data_fred, name)
@network
def test_fred_multi(self):
raise nose.SkipTest('buggy as of 2/18/14; maybe a data revision?')
names = ['CPIAUCSL', 'CPALTT01USQ661S', 'CPILFESL']
start = datetime(2010, 1, 1)
end = datetime(2013, 1, 27)
received = web.DataReader(names, "fred", start, end).head(1)
expected = DataFrame([[217.478, 0.99701529, 220.544]], columns=names,
index=[pd.tslib.Timestamp('2010-01-01 00:00:00')])
expected.index.rename('DATE', inplace=True)
assert_frame_equal(received, expected, check_less_precise=True)
@network
def test_fred_multi_bad_series(self):
names = ['NOTAREALSERIES', 'CPIAUCSL', "ALSO FAKE"]
with tm.assertRaises(HTTPError):
DataReader(names, data_source="fred")
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
tawsifkhan/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
Lafunamor/ns3 | src/flow-monitor/examples/wifi-olsr-flowmon.py | 108 | 7439 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
jseabold/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 45 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
SergioSantGre/project_portfolio | helper.py | 29 | 5643 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the training data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all training data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_training.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| lgpl-3.0 |
cvsuser-chromium/chromium | ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 154 | 8545 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
aetilley/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
Srisai85/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
robbymeals/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
aetilley/scikit-learn | sklearn/decomposition/dict_learning.py | 83 | 44062 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=gram,
max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
gram = np.dot(dictionary, dictionary.T)
if cov is None:
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
return _sparse_encode(X, dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init, max_iter=max_iter)
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram, cov[:, this_slice], algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
dictionary = np.ascontiguousarray(dictionary.T)
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
eli-s-goldberg/pbpk_breastmilk | src/pk_milk/pk_milk.py | 2 | 18143 | # coding=utf-8
# !/usr/bin/env python3
import numpy as np
import pandas as pd
from scipy import stats
import warnings
from scipy import integrate
import scipy.interpolate as interpolate
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# todo ( create function for lactation schedule and remove init)
class milk():
def __init__(self,
gens=1,
y_end=100,
lifespan=80,
brth_age=25,
average_lact_time=[0.5] * 5,
k_lac=[1e-2] * 5,
k_elim=[np.log(2) / 5] * 5,
odes_in_each_generation=2):
self.gens = gens
self.y_end = y_end
self.lifespan = lifespan
self.brth_age = brth_age
self.average_lact_time = average_lact_time
self.k_lac = k_lac
self.k_elim = k_elim
self.odes_in_each_generation = odes_in_each_generation
self.y_start = 0
def dt_from_timesteps_(self, timestep_variable,
method='timesteps_per_month'):
if method == 'set_delta_t':
self.delta_t = timestep_variable
self.timesteps_per_month = float(1 / self.delta_t)
print("new delta_t:", self.delta_t)
print("new timesteps/month:", self.timesteps_per_month)
if method == 'timesteps_per_month':
self.timesteps_per_month = timestep_variable
print("new timesteps/month:", self.timesteps_per_month)
self.delta_t = np.float(1 / self.timesteps_per_month)
print("new delta_t:", self.delta_t)
return self
def n_steps_(self):
self.n_steps = np.floor((self.y_end - self.y_start) / self.delta_t) + 1
print("calculated n_steps:", self.n_steps)
return self
def intake_intensity_curve_(self, intake_intensity_data=False,
method='points2spline',
peak_intensity=1, year_peak=False):
if method == 'points2spline':
col_year = np.array(intake_intensity_data[:, 0])
col_congener_intake = np.array(intake_intensity_data[:, 1])
year_one = col_year[0]
# Shift the years to zero
adj_year = col_year - year_one
if (col_year[0] != self.y_start) or (col_year[-1] != self.y_end):
warnings.warn('simulation time misaligned with intake curve.')
intake_intensity_spline = interpolate.InterpolatedUnivariateSpline(
adj_year, col_congener_intake)
if method == 'asymettric_exp_up_and_down':
num_steps_up = (year_peak - self.y_start) / self.delta_t
x_up = np.array([self.y_start, year_peak - self.y_start])
y_up = np.log([1e-20, peak_intensity])
(slope_up, intercept_up, r_value, p_value,
std_err) = stats.linregress(x_up, y_up)
# create the regression line
x_up_interp = np.linspace(self.y_start, year_peak, num_steps_up)
y_reg_line_up = np.polyval([slope_up, intercept_up], x_up_interp)
# take the exp of the reg line to return it to an exp fit
upswing_reg_lin = np.exp(y_reg_line_up)
# the remaining intensity is set to zero
num_steps_d = (self.y_end - (
year_peak + self.delta_t)) / self.delta_t
x_d_interp = np.linspace(year_peak + self.delta_t, self.y_end,
num_steps_d)
x_d = np.array([year_peak + self.delta_t, self.y_end])
y_d = np.log([peak_intensity, 1e-10])
(
slope_d, intercept_d, r_value, p_value,
std_err) = stats.linregress(
x_d, y_d)
y_reg_line_down = np.polyval([slope_d, intercept_d], x_d_interp)
dswing_reg_lin = np.exp(y_reg_line_down)
# concatenate the up and down swing sides
y_up_down = np.concatenate((upswing_reg_lin, dswing_reg_lin[1:]))
x_up_down = np.concatenate((x_up_interp, x_d_interp[1:]))
intake_intensity_spline = interpolate.InterpolatedUnivariateSpline(
x_up_down, y_up_down)
self.intake_intensity_curve = intake_intensity_spline
return self
def biomonitoring_eval_(self, biomonitoring_data,
exponential_fit=lambda x, a, c, d: a * np.exp(
-c * x) + d, p0=(1e-6, 1e-6, 1),
assumed_kinetic_order=1, method='lin2exp'):
"""
Evaluating biomonitoring data for an individual congener.
:biomonitoring_data: year column and congener blood concentration
column e.g.
:example:
+----+-----+
|Year|Conc.|
+----+-----+
|1996|13.92|
+----+-----+
|1997|17.43|
+----+-----+
|1998|11.33|
+----+-----+
:exponential_fit: fit function
:p0: initial conditions for non-linear fit
:assumed_kinetic_order: first or second order
:param method: lin2exp or exp
:return: year array, fitted y value array, slope
"""
col_year = np.array(biomonitoring_data[:, 0]).flatten('C')
col_congener = np.array(biomonitoring_data[:, 1]).flatten('C')
year_one = col_year[0]
if assumed_kinetic_order == 1:
# Shift the years to zero
adj_year = col_year - year_one
if method == 'lin2exp':
log_congener = np.log(col_congener)
slope, intercept, r_value, p_value, std_err = stats.linregress(
adj_year, log_congener)
return adj_year, np.polyval([slope, intercept],
adj_year), -slope, -r_value
elif method == 'exp':
(popt, pcov) = curve_fit(exponential_fit, col_year,
col_congener, p0=p0)
return adj_year, exponential_fit(adj_year, *popt), np.exp(
-popt[1])
elif assumed_kinetic_order == 2:
log_year = np.log(col_year)
log_congener = np.log(col_congener)
(slope, intercept, r_value, p_value, std_err) = stats.linregress(
log_year, log_congener)
return log_year, np.polyval([slope, intercept],
log_year), -slope, -r_value
def age_timeline_(self, brth_sched=False):
"""
Generate generational critical age definitions
:brth_sched: user provided birth schedule array
:example
brth_sched=[0, 15, 25, 35]
:return: self
"""
print('default interval age at which mother gives birth: ',
self.brth_age, 'years')
self.brth_sched = np.linspace(0, self.brth_age * self.gens,
self.gens + 1)
print('calculated birth time-table based on birth age:',
self.brth_sched, 'years')
self.cbtg_child = list(
map(lambda x: x + self.brth_age, self.brth_sched))
print('generational birth schedule:', self.cbtg_child, 'years')
self.aigd_mother = list(
map(lambda x: x + self.lifespan, self.brth_sched))
print('generational death schedule:', self.aigd_mother, 'years')
if brth_sched:
self.brth_sched = brth_sched
print('provided birth time-table:', self.brth_sched, 'years')
if self.gens < len(self.brth_sched):
print(
'more birth scheduling info was provided than gens '
'calculated.')
print(
'increase number of gens else birth scheduling info will '
'be ignored.')
self.brth_sched = self.brth_sched[:(self.gens + 1)]
warnings.warn(
'only the following section of the birth schedule will '
'be used:',
self.brth_sched)
if self.gens > len(self.brth_sched):
warnings.warn(
'insufficient birth scheduling information. increase gens '
'to match.')
return self
def lipid_mass_from_bw_and_lipid_fraction(self,
bodyweight_and_lipid_fraction_data):
"""
:param bodyweight_and_lipid_fraction_data:
:example: matrix format:
bodyweight_kg, lipid_fraction
3.79233266687, 0.213947530915
4.67272833393, 0.225984883013
5.46991188708, 0.237058988856
:return: array
"""
bodyweight_in_kg = np.array(
bodyweight_and_lipid_fraction_data[:, 0]).flatten('C')
lipid_fraction = np.array(
bodyweight_and_lipid_fraction_data[:, 1]).flatten('C')
# multiply every bodyweight by its corresponding lipid fraction
lipid_mass = np.multiply(bodyweight_in_kg, lipid_fraction)
return lipid_mass
def age_splines_(self, lipid_mass_array):
"""
create a list of age splines for each generation.
:param bodyweight_and_lipid_fraction_matrix:
:return: two arrays of spline objects describing each generation's lipid mass or
lipid mass change (derivative)
"""
age_spline_derivative = pd.DataFrame()
age_spline = pd.DataFrame()
num_steps_before = []
num_steps_after = []
for gen in range(0, self.gens):
num_steps_before.append(
np.int((self.brth_sched[gen] - self.y_start) / self.delta_t))
num_steps_after.append(
np.int((self.y_end - self.aigd_mother[gen]) / self.delta_t))
for gen in range(0, self.gens):
y_before = []
x_before = []
y_after = []
x_after = []
age_spline_n = pd.DataFrame()
age_spline_d = pd.DataFrame()
if num_steps_before[gen] == 0:
y_gen = lipid_mass_array
x_gen = np.linspace(self.brth_sched[gen], self.aigd_mother[gen],
len(y_gen))
if num_steps_after[gen] > 0:
y_after = np.zeros(np.int(num_steps_after[gen]))
x_after = np.linspace(self.aigd_mother[gen], self.y_end,
num_steps_after[gen])
# concatenate everything, but remove overlaps.
y_all = np.concatenate([y_before[:-1], y_gen, y_after[1:-1]])
x_all = np.concatenate([x_before[:-1], x_gen, x_after[1:-1]])
age_spline_n = interpolate.InterpolatedUnivariateSpline(x_all,
y_all,
k=1)
age_spline_d = age_spline_n.derivative()
elif num_steps_before[gen] > 0:
y_before = np.zeros(np.int(num_steps_before[gen]))
x_before = np.linspace(self.brth_sched[gen - 1],
self.brth_sched[gen],
num_steps_before[gen])
y_gen = lipid_mass_array
x_gen = np.linspace(self.brth_sched[gen], self.aigd_mother[gen],
len(y_gen))
if num_steps_after[gen] > 0:
y_after = np.zeros(np.int(num_steps_after[gen]))
x_after = np.linspace(self.aigd_mother[gen], self.y_end,
num_steps_after[gen])
# concatenate everything, but remove overlaps.
y_all = np.concatenate([y_before[:-1], y_gen, y_after[1:-1]])
x_all = np.concatenate([x_before[:-1], x_gen, x_after[1:-1]])
age_spline_n = interpolate.InterpolatedUnivariateSpline(x_all,
y_all,
k=1)
age_spline_d = age_spline_n.derivative()
age_spline = age_spline.append([age_spline_n], 1)
age_spline_derivative = age_spline_derivative.append([age_spline_d],
1)
self.age_spline = age_spline[0].ravel()
self.age_spline_derivative = age_spline_derivative[0].ravel()
return self
def body_mass(self, t, y):
'''
The first generation's mass balance should be specified above.
Every other generations balance, assuming it's the same, can be
specified here.
Note that 'cntr' variable is a loop tracking tool. To specify that
the previous box's mass balance should be employed, use
itr_mtrx[0][cntr]-X), where X is the total number of mass balances - 1.
This is because the array goes from 0 to 3, with length 4.
Use the np.int() to surround the itr_mtrx calls because arrays should
be tracked with integers, not floats.
You can add more mass balances, but do not change dydt_matrix[0][gen]
label. This is because these are a placeholder variables that are
reorganized from an array to a matrix.
For notes:
aig_mother[gen] # age the mother gives birth
cbtg_child[gen] # year child is born from previous gen
aigd_mother[gen] # age of death of the mother
'''
cntr = 0
# auto setup for multi-generational ode
odes_per_gen = range(0, self.odes_in_each_generation)
dydt_matrix = np.zeros(shape=(len(odes_per_gen), self.gens),
dtype=object)
order_array_counter = np.array(range(0, self.gens * len(odes_per_gen)))
itr_mtrx = order_array_counter.reshape((len(odes_per_gen), self.gens),
order='F')
def k_lac_m2c_(t, gen):
if np.all((t >= self.cbtg_child[gen]) & (
t <= self.cbtg_child[gen] + self.average_lact_time[
gen])):
return self.k_lac[gen]
else:
return 0.0
for gen in range(0, self.gens):
k_lac_current_gen_to_next_gen = k_lac_m2c_(t, gen)
if gen == 0:
dydt_matrix[0][cntr] = self.age_spline_derivative[0](t)
dydt_matrix[1][cntr] = self.intake_intensity_curve(t) * y[0] \
- self.k_elim[0] * y[0] * y[1] \
- k_lac_current_gen_to_next_gen * y[0] * \
y[1]
cntr = np.int(cntr + 1)
elif gen >= 1:
# itr matrix # is the ode within the generation. cntr is the
# generation
k_lac_current_gen_to_next_gen = k_lac_m2c_(t, gen)
k_lac_previous_gen_to_current_gen = k_lac_m2c_(t, (gen - 1))
dydt_matrix[0][cntr] = self.age_spline_derivative[gen](t)
dydt_matrix[1][cntr] = self.intake_intensity_curve(t) * y[
np.int(itr_mtrx[0][cntr])] \
- self.k_elim[gen] * y[
np.int(itr_mtrx[0][cntr])] * y[np.int(itr_mtrx[1][cntr])] \
+ k_lac_previous_gen_to_current_gen * y[
np.int(itr_mtrx[0][cntr - 1])] * y[
np.int(itr_mtrx[1][cntr - 1])] \
- k_lac_current_gen_to_next_gen * y[
np.int(itr_mtrx[0][cntr])] * y[
np.int(itr_mtrx[1][cntr])]
cntr = np.int(cntr + 1)
dydt = np.ravel(dydt_matrix, order='F')
return dydt
def generation_mass_balance(self, quickplot=False):
t = np.zeros((np.int(self.n_steps), 1))
# use ``vode`` with "backward differentiation formula" or 'bdf'
r = integrate.ode(self.body_mass).set_integrator('vode',
order=4,
nsteps=self.n_steps,
min_step=1e-30,
method='bdf')
y0 = np.zeros((np.int(self.gens * self.odes_in_each_generation), 1))
# because the model starts at year 0, adjust boundary conditions if
# age_spline != 0 @ 0.
if self.y_start == 0:
y0[[0]] = self.age_spline[0](0)
r.set_initial_value(y0, self.y_start)
# create vectors to store trajectories
ode_init = np.zeros((np.int(
self.n_steps * self.gens * self.odes_in_each_generation), 1))
ode_init_matrix = ode_init.reshape(
(self.odes_in_each_generation * self.gens,
np.int(self.n_steps)), order='F')
# initialize k for while loop
iter_odes = range(0, self.odes_in_each_generation * self.gens, 1)
k = 1
while r.successful() and k < self.n_steps:
r.integrate(r.t + self.delta_t)
t[k] = r.t
for ode in iter_odes:
ode_init_matrix[ode][k] = r.y[ode]
k += 1
if quickplot:
for ode in iter_odes:
ax1 = plt.subplot(len(iter_odes), 1, iter_odes[ode] + 1)
plt.plot(t, ode_init_matrix[ode][:])
ax1.plot(t, ode_init_matrix[ode][:])
ax1.set_xlim(self.y_start, self.y_end)
ax1.grid('on')
plt.xlim(self.y_start, self.y_end)
plt.show()
self.dydt_solution = ode_init_matrix
return self
| bsd-2-clause |
geomagpy/magpy | magpy/lib/format_json.py | 1 | 3038 | """
MagPy
IAGA02 input filter
Written by Roman Leonhardt June 2012
- contains test, read and write function
"""
from __future__ import print_function
import json
from matplotlib.dates import date2num
import numpy as np
from magpy.stream import KEYLIST, DataStream, loggerlib, testTimeString
def isJSON(filename):
"""
Checks whether a file is JSON format.
"""
try:
jsonfile = open(filename, 'r')
j = json.load(jsonfile)
except:
return False
try:
if j.get("domain").get("type") == 'Domain':
# Found Coverage json - use separate filter
return False
except:
pass
return True
def readJSON(filename, headonly=False, **kwargs):
"""
Reading JSON format data.
"""
stream = DataStream()
header = {}
array = [[] for key in KEYLIST]
with open(filename, 'r') as jsonfile:
dataset = json.load(jsonfile)
loggerlib.info('Read: %s, Format: %s ' % (filename, "JSON"))
fillkeys = ['var1', 'var2', 'var3', 'var4', 'var5', 'x', 'y', 'z', 'f']
datakeys = dataset[0]
keydict = {}
for i, key in enumerate(datakeys):
if 'time' in key:
keydict[i] = 'time'
elif key == 'density':
keydict[i] = 'var1'
fillkeys.pop(fillkeys.index('var1'))
elif key == 'speed':
keydict[i] = 'var2'
fillkeys.pop(fillkeys.index('var2'))
elif key == 'temperature':
keydict[i] = 'var3'
fillkeys.pop(fillkeys.index('var3'))
elif 'bx' in key.lower():
keydict[i] = 'x'
fillkeys.pop(fillkeys.index('x'))
elif 'by' in key.lower():
keydict[i] = 'y'
fillkeys.pop(fillkeys.index('y'))
elif 'bz' in key.lower():
keydict[i] = 'z'
fillkeys.pop(fillkeys.index('z'))
elif 'bt' in key.lower():
keydict[i] = 'f'
fillkeys.pop(fillkeys.index('f'))
else:
try:
keydict[i] = fillkeys.pop(0)
except IndexError:
loggerlib.warning("CAUTION! Out of available keys for data. {} will not be contained in stream.".format(key))
print("CAUTION! Out of available keys for data. {} will not be contained in stream.".format(key))
if 'time' in key:
data = [date2num(testTimeString(str(x[i]))) for x in dataset[1:]]
else:
data = [np.nan if x[i] is None else float(x[i]) for x in dataset[1:]]
array[KEYLIST.index(keydict[i])] = data
header['col-'+keydict[i]] = key
header['unit-col-'+keydict[i]] = ''
for idx, elem in enumerate(array):
array[idx] = np.asarray(array[idx])
stream = DataStream([],header,np.asarray(array))
return stream
| bsd-3-clause |
andipeng/MagnePlane | paper/images/trade_scripts/pressure_zoom_writer.py | 2 | 7103 | import numpy as np
import matplotlib.pylab as plt
from openmdao.api import Group, Problem, IndepVarComp
from hyperloop.Python import tube_and_pod
# def create_problem(component):
# root = Group()
# prob = Problem(root)
# prob.root.add('comp', component)
# return prob
# class PressureTradeStudy(object):
# def test_case1_vs_npss(self):
# component = tube_and_pod.TubeAndPod()
# prob = create_problem(component)
if __name__ == '__main__':
prob = Problem()
root = prob.root = Group()
root.add('TubeAndPod', tube_and_pod.TubeAndPod())
params = (('tube_pressure', 850.0, {'units' : 'Pa'}),
('pressure_initial', 760.2, {'units' : 'torr'}),
('num_pods', 18.),
('pwr', 18.5, {'units' : 'kW'}),
('speed', 163333.3, {'units' : 'L/min'}),
('time_down', 1440.0, {'units' : 'min'}),
('gamma', .8, {'units' : 'unitless'}),
('pump_weight', 715.0, {'units' : 'kg'}),
('electricity_price', 0.13, {'units' : 'USD/(kW*h)'}),
('tube_thickness', .0415014, {'units' : 'm'}),
('tube_length', 480000., {'units' : 'm'}),
('vf', 286.85, {'units' : 'm/s'}),
('v0', 286.85-15.0, {'units' : 'm/s'}),
('time_thrust', 1.5, {'units' : 's'}),
('pod_mach', .8, {'units': 'unitless'}),
('comp_inlet_area', 2.3884, {'units': 'm**2'}),
('comp_PR', 6.0, {'units': 'unitless'}),
('PsE', 0.05588, {'units': 'psi'}),
('des_time', 1.0),
('time_of_flight', 1.0),
('motor_max_current', 800.0),
('motor_LD_ratio', 0.83),
('motor_oversize_factor', 1.0),
('inverter_efficiency', 1.0),
('battery_cross_section_area', 15000.0, {'units': 'cm**2'}),
('n_passengers', 28.),
('A_payload', 2.3248, {'units' : 'm**2'}),
('r_pylon', 0.232, {'units' : 'm'}),
('h', 10.0, {'units' : 'm'}),
('vel_b', 23.0, {'units': 'm/s'}),
('h_lev', 0.01, {'unit': 'm'}),
('vel', 286.86, {'units': 'm/s'}),
('pod_period', 120.0, {'units' : 's'}),
('ib', .04),
('bm', 20.0, {'units' : 'yr'}),
('track_length', 600.0, {'units' : 'km'}),
('avg_speed', 286.86, {'units' : 'm/s'}),
('depth', 10.0, {'units' : 'm'}),
('land_length', 600.0e3, {'units' : 'm'}),
('water_length', 0.0e3, {'units' : 'm'}),
('W', 1.0, {'units' : 'kg/s'}),
('operating_time', 16.0*3600.0, {'units' : 's'})
)
prob.root.add('des_vars', IndepVarComp(params))
prob.root.connect('des_vars.tube_pressure', 'TubeAndPod.tube_pressure')
prob.root.connect('des_vars.pressure_initial', 'TubeAndPod.pressure_initial')
prob.root.connect('des_vars.num_pods', 'TubeAndPod.num_pods')
prob.root.connect('des_vars.pwr','TubeAndPod.pwr')
prob.root.connect('des_vars.speed', 'TubeAndPod.speed')
prob.root.connect('des_vars.time_down', 'TubeAndPod.time_down')
prob.root.connect('des_vars.gamma','TubeAndPod.gamma')
prob.root.connect('des_vars.pump_weight','TubeAndPod.pump_weight')
prob.root.connect('des_vars.electricity_price','TubeAndPod.electricity_price')
prob.root.connect('des_vars.tube_thickness', 'TubeAndPod.tube_thickness')
prob.root.connect('des_vars.tube_length', 'TubeAndPod.tube_length')
prob.root.connect('des_vars.h', 'TubeAndPod.h')
prob.root.connect('des_vars.r_pylon', 'TubeAndPod.r_pylon')
prob.root.connect('des_vars.vf', 'TubeAndPod.vf')
prob.root.connect('des_vars.v0', 'TubeAndPod.v0')
prob.root.connect('des_vars.time_thrust', 'TubeAndPod.time_thrust')
prob.root.connect('des_vars.pod_mach', 'TubeAndPod.pod_mach')
prob.root.connect('des_vars.comp_inlet_area', 'TubeAndPod.comp_inlet_area')
prob.root.connect('des_vars.comp_PR', 'TubeAndPod.comp.map.PRdes')
prob.root.connect('des_vars.PsE', 'TubeAndPod.nozzle.Ps_exhaust')
prob.root.connect('des_vars.des_time', 'TubeAndPod.des_time')
prob.root.connect('des_vars.time_of_flight', 'TubeAndPod.time_of_flight')
prob.root.connect('des_vars.motor_max_current', 'TubeAndPod.motor_max_current')
prob.root.connect('des_vars.motor_LD_ratio', 'TubeAndPod.motor_LD_ratio')
prob.root.connect('des_vars.motor_oversize_factor', 'TubeAndPod.motor_oversize_factor')
prob.root.connect('des_vars.inverter_efficiency', 'TubeAndPod.inverter_efficiency')
prob.root.connect('des_vars.battery_cross_section_area', 'TubeAndPod.battery_cross_section_area')
prob.root.connect('des_vars.n_passengers', 'TubeAndPod.n_passengers')
prob.root.connect('des_vars.A_payload', 'TubeAndPod.A_payload')
prob.root.connect('des_vars.vel_b', 'TubeAndPod.vel_b')
prob.root.connect('des_vars.h_lev', 'TubeAndPod.h_lev')
prob.root.connect('des_vars.vel', 'TubeAndPod.vel')
prob.root.connect('des_vars.pod_period', 'TubeAndPod.cost.pod_period')
prob.root.connect('des_vars.ib', 'TubeAndPod.cost.ib')
prob.root.connect('des_vars.bm', 'TubeAndPod.cost.bm')
prob.root.connect('des_vars.track_length', 'TubeAndPod.track_length')
prob.root.connect('des_vars.avg_speed', 'TubeAndPod.cost.avg_speed')
prob.root.connect('des_vars.land_length', 'TubeAndPod.land_length')
prob.root.connect('des_vars.water_length', 'TubeAndPod.water_length')
prob.root.connect('des_vars.operating_time', 'TubeAndPod.operating_time')
prob.root.connect('des_vars.W', 'TubeAndPod.fl_start.W')
prob.setup()
p_tunnel = np.linspace(40.0, 500.0, num = 50, endpoint = False)
A_tube = np.zeros((1, len(p_tunnel)))
Re = np.zeros((1, len(p_tunnel)))
T_tunnel = np.zeros((1, len(p_tunnel)))
L_pod = np.zeros((1, len(p_tunnel)))
Drag = np.zeros((1, len(p_tunnel)))
power = np.zeros((1, len(p_tunnel)))
steady_vac = np.zeros((1,len(p_tunnel)))
total_energy = np.zeros((1, len(p_tunnel)))
thrust = np.zeros((1, len(p_tunnel)))
for i in range(len(p_tunnel)):
prob['des_vars.tube_pressure'] = p_tunnel[i]
prob.run()
A_tube[0,i] = prob['TubeAndPod.pod.A_tube']
Re[0,i] = prob['TubeAndPod.pod.pod_mach.Re']
T_tunnel[0,i] = prob['TubeAndPod.tube.temp_boundary']
L_pod[0,i] = prob['TubeAndPod.L_pod']
power[0,i] = -1.0*prob['TubeAndPod.pod.cycle.comp.power']
steady_vac[0,i] = -1.0*prob['TubeAndPod.tube.comp.power']
total_energy[0,i] = prob['TubeAndPod.cost.total_energy_cost']
print(i)
np.savetxt('../../../paper/images/data_files/pressure_zoom/p_tunnel.txt', p_tunnel, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/Re.txt', Re, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/A_tube.txt', A_tube, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/T_tunnel.txt', T_tunnel, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/L_pod.txt', L_pod, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/comp_power.txt', power, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/vac_power.txt', steady_vac, fmt = '%f', delimiter = '\t', newline = '\r\n')
np.savetxt('../../../paper/images/data_files/pressure_zoom/total_energy.txt', total_energy, fmt = '%f', delimiter = '\t', newline = '\r\n')
| apache-2.0 |
AlexRobson/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
UK-MAC/mega-stream | scripts/heatmap.py | 1 | 2288 | #
#
# Copyright 2016 Tom Deakin, University of Bristol
#
# This file is part of mega-stream.
#
# mega-stream is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mega-stream is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mega-stream. If not, see <http://www.gnu.org/licenses/>.
#
#
# This aims to investigate the limiting factor for a simple kernel, in particular
# where bandwidth limits not to be reached, and latency becomes a dominating factor.
#
#
import numpy
import matplotlib.pyplot as plt
data = numpy.zeros((8,5))
data[0] = [71103.0, 114238.5, 94292.4, 92105.7, 52930.6]
data[1] = [147649.4, 223801.5, 251318.1, 227114.9, 196121.0]
data[2] = [252762.3, 311192.7, 294210.3, 227833.1, 185339.1]
data[3] = [310676.5, 395393.0, 302705.0, 195018.7, 0.0]
data[4] = [351479.6, 332399.7, 241249.2, 183720.3, 0.0]
data[5] = [439309.4, 294268.8, 191220.3, 168287.6, 0.0]
data[6] = [411714.6, 212903.5, 167718.5, 0.0, 0.0]
data[7] = [270262.7, 181380.7, 145228.9, 0.0, 0.0]
data *= 1.0E-3
fig, ax = plt.subplots()
plt.pcolor(data, cmap='GnBu')
ax.set_xticks(numpy.arange(data.shape[1]) + 0.5)
ax.set_yticks(numpy.arange(data.shape[0]) + 0.5)
ax.set_xticklabels([4, 8, 16, 32, 64])
ax.set_yticklabels([8, 16, 32, 64, 128, 256, 512, 1024])
ax.set_xlabel('Middle size')
ax.set_ylabel('Inner size')
plt.title('Outer size=64')
cbr = plt.colorbar()
cbr.ax.set_ylabel('Bandwidth GB/s')
# Add data labels
for i in range(data.shape[1]):
for j in range(data.shape[0]):
if (data[j][i] != 0.0):
plt.text(i + 0.5, j + 0.5, '%.1f' % (data[j][i]),
ha='center', va='center',
size='small', color='black', weight='bold')
else:
plt.text(i + 0.5, j + 0.5, '-',
ha='center', va='center',
size='small', color='black', weight='bold')
#fig.set_tight_layout(True)
plt.savefig('heatmap.pdf')
| gpl-3.0 |
vivekmishra1991/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
escherba/flaubert | setup.py | 1 | 3309 | import re
import itertools
from setuptools import setup, find_packages
from pkg_resources import resource_string
from glob import glob
# dependency links
SKIP_RE = re.compile(r'^\s*(?:-\S+)\s+(.*)$')
# Regex groups: 0: URL part, 1: package name, 2: package version
EGG_RE = re.compile(r'^(git\+https?://[^#]+)(?:#egg=([a-z0-9_.]+)(?:-([a-z0-9_.-]+))?)?$')
# Regex groups: 0: URL part, 1: package name, 2: branch name
URL_RE = re.compile(r'^\s*(https?://[\w\.]+.*/([^\/]+)/archive/)([^\/]+).zip$')
# our custom way of specifying extra requirements in separate text files
EXTRAS_RE = re.compile(r'.*\bextras\.(\w+)\.txt$')
def parse_reqs(reqs):
"""Parse requirements.txt files into lists of requirements and dependencies
"""
pkg_reqs = []
dep_links = []
for req in reqs:
# find things like `--find-links <URL>`
dep_link_info = SKIP_RE.match(req)
if dep_link_info is not None:
url = dep_link_info.group(1)
dep_links.append(url)
continue
# add packages of form:
# git+https://github.com/escherba/pymaptools#egg=pymaptools-0.1.15
egg_info = EGG_RE.match(req)
if egg_info is not None:
url, egg, version = egg_info.group(0, 2, 3)
#if version is None:
# pkg_reqs.append(egg)
#else:
# pkg_reqs.append(egg + '==' + version)
dep_links.append(url)
continue
# add packages of form:
# https://github.com/escherba/matplotlib/archive/qs_fix_build.zip
zip_info = URL_RE.match(req)
if zip_info is not None:
url, pkg = zip_info.group(0, 2)
pkg_reqs.append(pkg)
dep_links.append(url)
continue
pkg_reqs.append(req)
return pkg_reqs, dep_links
def build_extras(glob_pattern):
"""Generate extras_require mapping
"""
fnames = glob(glob_pattern)
result = dict()
dep_links = []
for fname in fnames:
extras_match = EXTRAS_RE.match(fname)
if extras_match is not None:
extras_file = extras_match.group(0)
extras_name = extras_match.group(1)
with open(extras_file, 'r') as fhandle:
result[extras_name], deps = parse_reqs(fhandle.readlines())
dep_links.extend(deps)
return result, dep_links
INSTALL_REQUIRES, INSTALL_DEPS = parse_reqs(
resource_string(__name__, 'requirements.txt').splitlines())
TESTS_REQUIRE, TESTS_DEPS = parse_reqs(
resource_string(__name__, 'dev-requirements.txt').splitlines())
EXTRAS_REQUIRE, EXTRAS_DEPS = build_extras('extras-*-requirements.txt')
DEPENDENCY_LINKS = list(set(itertools.chain(
INSTALL_DEPS,
TESTS_DEPS,
EXTRAS_DEPS
)))
setup(
name="flaubert",
version='0.0.1',
author="Eugene Scherba",
author_email="[email protected]",
description=("Tools for word2vec-based sentiment analysis"),
url='https://github.com/escherba/flaubert',
packages=find_packages(exclude=['tests']),
long_description=resource_string(__name__, 'README.rst'),
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
tests_require=TESTS_REQUIRE,
dependency_links=DEPENDENCY_LINKS,
test_suite='nose.collector',
classifiers=[
]
)
| mit |
moritzschaefer/the-search-engine | test/test_indexer.py | 1 | 3237 | import os
import math
import json
import pandas as pd
import numpy as np
from nose.tools import eq_
from nose import with_setup
import mock
from tsg.config import DATA_DIR, FIELD_WEIGHTS
from tsg.indexer.base import parse_term, hash_index,\
create_index, create_indexinfo
def test_parse_term():
TERM = 'aa'
term_file = 'test/files/{}.csv'.format(TERM)
N = 3
# These values are taken from aa.csv directly
w1count = (np.array([0, 1, 0, 3]) * FIELD_WEIGHTS).sum()
w2count = (np.array([0, 1, 0, 1]) * FIELD_WEIGHTS).sum()
qscores = pd.DataFrame({'qscore': [0.75, 2/3]},
index=['598859a0-eaa7-466a-8919-e6260c89edef',
'31a8e3b4-8c67-4fb7-b11a-1df1105617a2'])
qscores.index.name = 'uuid'
pagerank_scores = pd.DataFrame({'pagerank_score': [5, 1]},
index=['598859a0-eaa7-466a-8919-e6260c89edef',
'31a8e3b4-8c67-4fb7-b11a-1df1105617a2'])
pagerank_scores.index.name = 'uuid'
# scale pagerank_scores
scaled_pagerank_scores = np.log10(pagerank_scores)
scaled_pagerank_scores += 1 - np.min(scaled_pagerank_scores)#
termline = parse_term(term_file, N, qscores, pagerank_scores)
w1 = (1+math.log10(w1count)) * \
(1+math.log10(N/2)) * \
qscores.loc['598859a0-eaa7-466a-8919-e6260c89edef'].qscore * \
scaled_pagerank_scores.loc['598859a0-eaa7-466a-8919-e6260c89edef'].pagerank_score
w2 = (1+math.log10(w2count)) * \
(1+math.log10(N/2)) * \
qscores.loc['31a8e3b4-8c67-4fb7-b11a-1df1105617a2'].qscore * \
scaled_pagerank_scores.loc['31a8e3b4-8c67-4fb7-b11a-1df1105617a2'].pagerank_score
# overwrite weights because of numerical issue
#w2 = 0.18126459066485565
eq_(termline,
'598859a0-eaa7-466a-8919-e6260c89edef:{},'
'31a8e3b4-8c67-4fb7-b11a-1df1105617a2:{}'.format(w1, w2))
TEST_DICT_PATH = DATA_DIR + 'testdict.dat'
TEST_INDEXINFO_PATH = DATA_DIR + 'testinfo.json'
def clean_testfiles():
for f in [TEST_DICT_PATH, TEST_INDEXINFO_PATH]:
try:
os.remove(TEST_DICT_PATH)
except FileNotFoundError:
pass
@with_setup(clean_testfiles, clean_testfiles)
@mock.patch('tsg.indexer.base.create_indexinfo')
def test_create_index(create_indexinfo_mock):
num_documents = 3
# TODO add some files to test/files/intermediate and check the dictionary
# later
create_index('test/files/intermediate/',
'test/files/parsed',
num_documents,
TEST_DICT_PATH,
TEST_INDEXINFO_PATH,
'test/files/qscores_a.csv',
'test/files/pagerank_a.csv')
assert os.path.isfile(TEST_INDEXINFO_PATH)
create_indexinfo_mock.assert_called_with(num_documents, TEST_INDEXINFO_PATH)
@with_setup(clean_testfiles, clean_testfiles)
def test_create_indexinfo():
num_documents = 3
create_indexinfo(3, TEST_INDEXINFO_PATH)
assert os.path.isfile(TEST_INDEXINFO_PATH)
with open(TEST_INDEXINFO_PATH) as f:
assert json.load(f) == {'num_documents': num_documents}
def test_hash_index():
hash_index()
# TODO
| mit |
pprett/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 66 | 5806 | import numpy as np
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import silhouette_samples
from sklearn.metrics import pairwise_distances
from sklearn.metrics.cluster import calinski_harabaz_score
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X_dense = dataset.data
X_csr = csr_matrix(X_dense)
X_dok = sp.dok_matrix(X_dense)
X_lil = sp.lil_matrix(X_dense)
y = dataset.target
for X in [X_dense, X_csr, X_dok, X_lil]:
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
score_precomputed = silhouette_score(D, y, metric='precomputed')
assert_greater(score_precomputed, 0)
# Test without calculating D
score_euclidean = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(score_precomputed, score_euclidean)
if X is X_dense:
score_dense_without_sampling = score_precomputed
else:
assert_almost_equal(score_euclidean,
score_dense_without_sampling)
# Test with sampling
score_precomputed = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
score_euclidean = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert_greater(score_precomputed, 0)
assert_greater(score_euclidean, 0)
assert_almost_equal(score_euclidean, score_precomputed)
if X is X_dense:
score_dense_with_sampling = score_precomputed
else:
assert_almost_equal(score_euclidean, score_dense_with_sampling)
def test_cluster_size_1():
# Assert Silhouette Coefficient == 0 when there is 1 sample in a cluster
# (cluster 0). We also test the case where there are identical samples
# as the only members of a cluster (cluster 2). To our knowledge, this case
# is not discussed in reference material, and we choose for it a sample
# score of 1.
X = [[0.], [1.], [1.], [2.], [3.], [3.]]
labels = np.array([0, 1, 1, 1, 2, 2])
# Cluster 0: 1 sample -> score of 0 by Rousseeuw's convention
# Cluster 1: intra-cluster = [.5, .5, 1]
# inter-cluster = [1, 1, 1]
# silhouette = [.5, .5, 0]
# Cluster 2: intra-cluster = [0, 0]
# inter-cluster = [arbitrary, arbitrary]
# silhouette = [1., 1.]
silhouette = silhouette_score(X, labels)
assert_false(np.isnan(silhouette))
ss = silhouette_samples(X, labels)
assert_array_equal(ss, [0, .5, .5, 0, 1, 1])
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
def test_non_encoded_labels():
dataset = datasets.load_iris()
X = dataset.data
labels = dataset.target
assert_equal(
silhouette_score(X, labels * 2 + 10), silhouette_score(X, labels))
assert_array_equal(
silhouette_samples(X, labels * 2 + 10), silhouette_samples(X, labels))
def test_non_numpy_labels():
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
assert_equal(
silhouette_score(list(X), list(y)), silhouette_score(X, y))
def test_calinski_harabaz_score():
rng = np.random.RandomState(seed=0)
# Assert message when there is only one label
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.zeros(10))
# Assert message when all point are in different clusters
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.arange(10))
# Assert the value is 1. when all samples are equals
assert_equal(1., calinski_harabaz_score(np.ones((10, 2)),
[0] * 5 + [1] * 5))
# Assert the value is 0. when all the mean cluster are equal
assert_equal(0., calinski_harabaz_score([[-1, -1], [1, 1]] * 10,
[0] * 10 + [1] * 10))
# General case (with non numpy arrays)
X = ([[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 +
[[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
assert_almost_equal(calinski_harabaz_score(X, labels),
45 * (40 - 4) / (5 * (4 - 1)))
| bsd-3-clause |
zorojean/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/utils/_testing.py | 7 | 33219 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# Thierry Guillemot
# License: BSD 3 clause
import os
import os.path as op
import inspect
import warnings
import sys
import functools
import tempfile
from subprocess import check_output, STDOUT, CalledProcessError
from subprocess import TimeoutExpired
import re
import contextlib
from collections.abc import Iterable
import scipy as sp
from functools import wraps
from inspect import signature
import shutil
import atexit
import unittest
from unittest import TestCase
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
from numpy.testing import assert_allclose
from numpy.testing import assert_almost_equal
from numpy.testing import assert_approx_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
import joblib
import sklearn
from sklearn.utils import IS_PYPY, _IS_32BIT
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import (
check_array,
check_is_fitted,
check_X_y,
)
__all__ = ["assert_raises",
"assert_raises_regexp",
"assert_array_equal",
"assert_almost_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_approx_equal", "assert_allclose",
"assert_run_python_script", "SkipTest"]
_dummy = TestCase('__init__')
assert_raises = _dummy.assertRaises
SkipTest = unittest.case.SkipTest
assert_dict_equal = _dummy.assertDictEqual
assert_raises_regex = _dummy.assertRaisesRegex
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Callable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'FutureWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str or callable
The message or a substring of the message to test for. If callable,
it takes a string as the argument and will trigger an AssertionError
if the callable returns `False`.
func : callable
Callable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'FutureWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
def check_in_message(msg): return message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
def assert_warns_div0(func, *args, **kw):
"""Assume that numpy's warning for divide by zero is raised.
Handles the case of platforms that do not support warning on divide by
zero.
Parameters
----------
func
*args
**kw
"""
with np.errstate(divide='warn', invalid='warn'):
try:
assert_warns(RuntimeWarning, np.divide, 1, np.zeros(1))
except AssertionError:
# This platform does not report numpy divide by zeros
return func(*args, **kw)
return assert_warns_message(RuntimeWarning,
'invalid value encountered',
func, *args, **kw)
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
"""
Parameters
----------
func
*args
**kw
"""
# very important to avoid uncontrolled state propagation
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'FutureWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: [%s]"
% (func.__name__,
', '.join(str(warning) for warning in w)))
return result
def ignore_warnings(obj=None, category=Warning):
"""Context manager and decorator to ignore warnings.
Note: Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging, this is not your tool of choice.
Parameters
----------
obj : callable, default=None
callable where you want to ignore the warnings.
category : warning class, default=Warning
The category to filter. If Warning, all categories will be muted.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if isinstance(obj, type) and issubclass(obj, Warning):
# Avoid common pitfall of passing category as the first positional
# argument which result in the test not being run
warning_name = obj.__name__
raise ValueError(
"'obj' should be a callable where you want to ignore warnings. "
"You passed a warning class instead: 'obj={warning_name}'. "
"If you want to pass a warning class to ignore_warnings, "
"you should use 'category={warning_name}'".format(
warning_name=warning_name))
elif callable(obj):
return _IgnoreWarnings(category=category)(obj)
else:
return _IgnoreWarnings(category=category)
class _IgnoreWarnings:
"""Improved and simplified Python warnings context manager and decorator.
This class allows the user to ignore the warnings raised by a function.
Copied from Python 2.7.5 and modified as required.
Parameters
----------
category : tuple of warning class, default=Warning
The category to filter. By default, all the categories will be muted.
"""
def __init__(self, category):
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
self.category = category
def __call__(self, fn):
"""Decorator to catch and hide warnings without visual nesting."""
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", self.category)
return fn(*args, **kwargs)
return wrapper
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
warnings.simplefilter("ignore", self.category)
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test the message raised in an exception.
Given an exception, a callable to raise the exception, and
a message string, tests that the correct exception is raised and
that the message is a substring of the error thrown. Used to test
that the specific message thrown during an exception is correct.
Parameters
----------
exceptions : exception or tuple of exception
An Exception object.
message : str
The error message or a substring of the error message.
function : callable
Callable object to raise error.
*args : the positional arguments to `function`.
**kwargs : the keyword arguments to `function`.
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=''):
"""Assert allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : {array-like, sparse matrix}
First array to compare.
y : {array-like, sparse matrix}
Second array to compare.
rtol : float, default=1e-07
relative tolerance; see numpy.allclose.
atol : float, default=1e-9
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
err_msg : str, default=''
Error message to raise.
"""
if sp.sparse.issparse(x) and sp.sparse.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
assert_array_equal(x.indices, y.indices, err_msg=err_msg)
assert_array_equal(x.indptr, y.indptr, err_msg=err_msg)
assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg)
elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y):
# both dense
assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
else:
raise ValueError("Can only compare two sparse matrices,"
" not a sparse matrix and an array.")
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Parameters
----------
estimator : object
The estimator.
random_state : int, RandomState instance or None, default=0
Pseudo random number generator state.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
"""
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
try:
import pytest
skip_if_32bit = pytest.mark.skipif(_IS_32BIT,
reason='skipped on 32bit platforms')
skip_travis = pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
reason='skip on travis')
fails_if_pypy = pytest.mark.xfail(IS_PYPY,
reason='not compatible with PyPy')
skip_if_no_parallel = pytest.mark.skipif(not joblib.parallel.mp,
reason="joblib is in serial mode")
# Decorator for tests involving both BLAS calls and multiprocessing.
#
# Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction
# with some implementation of BLAS (or other libraries that manage an
# internal posix thread pool) can cause a crash or a freeze of the Python
# process.
#
# In practice all known packaged distributions (from Linux distros or
# Anaconda) of BLAS under Linux seems to be safe. So we this problem seems
# to only impact OSX users.
#
# This wrapper makes it possible to skip tests that can possibly cause
# this crash under OS X with.
#
# Under Python 3.4+ it is possible to use the `forkserver` start method
# for multiprocessing to avoid this issue. However it can cause pickling
# errors on interactively defined functions. It therefore not enabled by
# default.
if_safe_multiprocessing_with_blas = pytest.mark.skipif(
sys.platform == 'darwin',
reason="Possible multi-process bug with some BLAS")
except ImportError:
pass
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence).
"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap:
"""
Parameters
----------
data
mmap_mode : str, default='r'
"""
def __init__(self, data, mmap_mode='r'):
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
data_read_only, self.temp_folder = create_memmap_backed_data(
self.data, mmap_mode=self.mmap_mode, return_folder=True)
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
def create_memmap_backed_data(data, mmap_mode='r', return_folder=False):
"""
Parameters
----------
data
mmap_mode : str, default='r'
return_folder : bool, default=False
"""
temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
atexit.register(functools.partial(_delete_folder, temp_folder, warn=True))
filename = op.join(temp_folder, 'data.pkl')
joblib.dump(data, filename)
memmap_backed_data = joblib.load(filename, mmap_mode=mmap_mode)
result = (memmap_backed_data if not return_folder
else (memmap_backed_data, temp_folder))
return result
# Utils to test docstrings
def _get_args(function, varargs=False):
"""Helper to get function arguments."""
try:
params = signature(function).parameters
except ValueError:
# Error on builtin C function
return []
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _get_func_name(func):
"""Get function full name.
Parameters
----------
func : callable
The function object.
Returns
-------
name : str
The function name.
"""
parts = []
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
qualname = func.__qualname__
if qualname != func.__name__:
parts.append(qualname[:qualname.find('.')])
parts.append(func.__name__)
return '.'.join(parts)
def check_docstring_parameters(func, doc=None, ignore=None):
"""Helper to check docstring.
Parameters
----------
func : callable
The function object to test.
doc : str, default=None
Docstring if it is passed manually to the test.
ignore : list, default=None
Parameters to ignore.
Returns
-------
incorrect : list
A list of string describing the incorrect results.
"""
from numpydoc import docscrape
incorrect = []
ignore = [] if ignore is None else ignore
func_name = _get_func_name(func)
if (not func_name.startswith('sklearn.') or
func_name.startswith('sklearn.externals')):
return incorrect
# Don't check docstring for property-functions
if inspect.isdatadescriptor(func):
return incorrect
# Don't check docstring for setup / teardown pytest functions
if func_name.split('.')[-1] in ('setup_module', 'teardown_module'):
return incorrect
# Dont check estimator_checks module
if func_name.split('.')[2] == 'estimator_checks':
return incorrect
# Get the arguments from the function signature
param_signature = list(filter(lambda x: x not in ignore, _get_args(func)))
# drop self
if len(param_signature) > 0 and param_signature[0] == 'self':
param_signature.remove('self')
# Analyze function's docstring
if doc is None:
with warnings.catch_warnings(record=True) as w:
try:
doc = docscrape.FunctionDoc(func)
except Exception as exp:
incorrect += [func_name + ' parsing error: ' + str(exp)]
return incorrect
if len(w):
raise RuntimeError('Error for %s:\n%s' % (func_name, w[0]))
param_docs = []
for name, type_definition, param_doc in doc['Parameters']:
# Type hints are empty only if parameter name ended with :
if not type_definition.strip():
if ':' in name and name[:name.index(':')][-1:].strip():
incorrect += [func_name +
' There was no space between the param name and '
'colon (%r)' % name]
elif name.rstrip().endswith(':'):
incorrect += [func_name +
' Parameter %r has an empty type spec. '
'Remove the colon' % (name.lstrip())]
# Create a list of parameters to compare with the parameters gotten
# from the func signature
if '*' not in name:
param_docs.append(name.split(':')[0].strip('` '))
# If one of the docstring's parameters had an error then return that
# incorrect message
if len(incorrect) > 0:
return incorrect
# Remove the parameters that should be ignored from list
param_docs = list(filter(lambda x: x not in ignore, param_docs))
# The following is derived from pytest, Copyright (c) 2004-2017 Holger
# Krekel and others, Licensed under MIT License. See
# https://github.com/pytest-dev/pytest
message = []
for i in range(min(len(param_docs), len(param_signature))):
if param_signature[i] != param_docs[i]:
message += ["There's a parameter name mismatch in function"
" docstring w.r.t. function signature, at index %s"
" diff: %r != %r" %
(i, param_signature[i], param_docs[i])]
break
if len(param_signature) > len(param_docs):
message += ["Parameters in function docstring have less items w.r.t."
" function signature, first missing item: %s" %
param_signature[len(param_docs)]]
elif len(param_signature) < len(param_docs):
message += ["Parameters in function docstring have more items w.r.t."
" function signature, first extra item: %s" %
param_docs[len(param_signature)]]
# If there wasn't any difference in the parameters themselves between
# docstring and signature including having the same length then return
# empty list
if len(message) == 0:
return []
import difflib
import pprint
param_docs_formatted = pprint.pformat(param_docs).splitlines()
param_signature_formatted = pprint.pformat(param_signature).splitlines()
message += ["Full diff:"]
message.extend(
line.strip() for line in difflib.ndiff(param_signature_formatted,
param_docs_formatted)
)
incorrect.extend(message)
# Prepend function name
incorrect = ['In function: ' + func_name] + incorrect
return incorrect
def assert_run_python_script(source_code, timeout=60):
"""Utility to check assertions in an independent Python subprocess.
The script provided in the source code should return 0 and not print
anything on stderr or stdout.
This is a port from cloudpickle https://github.com/cloudpipe/cloudpickle
Parameters
----------
source_code : str
The Python source code to execute.
timeout : int, default=60
Time in seconds before timeout.
"""
fd, source_file = tempfile.mkstemp(suffix='_src_test_sklearn.py')
os.close(fd)
try:
with open(source_file, 'wb') as f:
f.write(source_code.encode('utf-8'))
cmd = [sys.executable, source_file]
cwd = op.normpath(op.join(op.dirname(sklearn.__file__), '..'))
env = os.environ.copy()
try:
env["PYTHONPATH"] = os.pathsep.join([cwd, env["PYTHONPATH"]])
except KeyError:
env["PYTHONPATH"] = cwd
kwargs = {
'cwd': cwd,
'stderr': STDOUT,
'env': env
}
# If coverage is running, pass the config file to the subprocess
coverage_rc = os.environ.get("COVERAGE_PROCESS_START")
if coverage_rc:
kwargs['env']['COVERAGE_PROCESS_START'] = coverage_rc
kwargs['timeout'] = timeout
try:
try:
out = check_output(cmd, **kwargs)
except CalledProcessError as e:
raise RuntimeError(u"script errored with output:\n%s"
% e.output.decode('utf-8'))
if out != b"":
raise AssertionError(out.decode('utf-8'))
except TimeoutExpired as e:
raise RuntimeError(u"script timeout, output so far:\n%s"
% e.output.decode('utf-8'))
finally:
os.unlink(source_file)
def _convert_container(container, constructor_name, columns_name=None):
if constructor_name == 'list':
return list(container)
elif constructor_name == 'tuple':
return tuple(container)
elif constructor_name == 'array':
return np.asarray(container)
elif constructor_name == 'sparse':
return sp.sparse.csr_matrix(container)
elif constructor_name == 'dataframe':
pd = pytest.importorskip('pandas')
return pd.DataFrame(container, columns=columns_name)
elif constructor_name == 'series':
pd = pytest.importorskip('pandas')
return pd.Series(container)
elif constructor_name == 'index':
pd = pytest.importorskip('pandas')
return pd.Index(container)
elif constructor_name == 'slice':
return slice(container[0], container[1])
elif constructor_name == 'sparse_csr':
return sp.sparse.csr_matrix(container)
elif constructor_name == 'sparse_csc':
return sp.sparse.csc_matrix(container)
def raises(expected_exc_type, match=None, may_pass=False, err_msg=None):
"""Context manager to ensure exceptions are raised within a code block.
This is similar to and inspired from pytest.raises, but supports a few
other cases.
This is only intended to be used in estimator_checks.py where we don't
want to use pytest. In the rest of the code base, just use pytest.raises
instead.
Parameters
----------
excepted_exc_type : Exception or list of Exception
The exception that should be raised by the block. If a list, the block
should raise one of the exceptions.
match : str or list of str, default=None
A regex that the exception message should match. If a list, one of
the entries must match. If None, match isn't enforced.
may_pass : bool, default=False
If True, the block is allowed to not raise an exception. Useful in
cases where some estimators may support a feature but others must
fail with an appropriate error message. By default, the context
manager will raise an exception if the block does not raise an
exception.
err_msg : str, default=None
If the context manager fails (e.g. the block fails to raise the
proper exception, or fails to match), then an AssertionError is
raised with this message. By default, an AssertionError is raised
with a default error message (depends on the kind of failure). Use
this to indicate how users should fix their estimators to pass the
checks.
Attributes
----------
raised_and_matched : bool
True if an exception was raised and a match was found, False otherwise.
"""
return _Raises(expected_exc_type, match, may_pass, err_msg)
class _Raises(contextlib.AbstractContextManager):
# see raises() for parameters
def __init__(self, expected_exc_type, match, may_pass, err_msg):
self.expected_exc_types = (
expected_exc_type
if isinstance(expected_exc_type, Iterable)
else [expected_exc_type]
)
self.matches = [match] if isinstance(match, str) else match
self.may_pass = may_pass
self.err_msg = err_msg
self.raised_and_matched = False
def __exit__(self, exc_type, exc_value, _):
# see
# https://docs.python.org/2.5/whatsnew/pep-343.html#SECTION000910000000000000000
if exc_type is None: # No exception was raised in the block
if self.may_pass:
return True # CM is happy
else:
err_msg = (
self.err_msg or f"Did not raise: {self.expected_exc_types}"
)
raise AssertionError(err_msg)
if not any(
issubclass(exc_type, expected_type)
for expected_type in self.expected_exc_types
):
if self.err_msg is not None:
raise AssertionError(self.err_msg) from exc_value
else:
return False # will re-raise the original exception
if self.matches is not None:
err_msg = self.err_msg or (
"The error message should contain one of the following "
"patterns:\n{}\nGot {}".format(
"\n".join(self.matches), str(exc_value)
)
)
if not any(re.search(match, str(exc_value))
for match in self.matches):
raise AssertionError(err_msg) from exc_value
self.raised_and_matched = True
return True
class MinimalClassifier:
"""Minimal classifier implementation with inheriting from BaseEstimator.
This estimator should be tested with:
* `check_estimator` in `test_estimator_checks.py`;
* within a `Pipeline` in `test_pipeline.py`;
* within a `SearchCV` in `test_search.py`.
"""
_estimator_type = "classifier"
def __init__(self, param=None):
self.param = param
def get_params(self, deep=True):
return {"param": self.param}
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
return self
def fit(self, X, y):
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, counts = np.unique(y, return_counts=True)
self._most_frequent_class_idx = counts.argmax()
return self
def predict_proba(self, X):
check_is_fitted(self)
X = check_array(X)
proba_shape = (X.shape[0], self.classes_.size)
y_proba = np.zeros(shape=proba_shape, dtype=np.float64)
y_proba[:, self._most_frequent_class_idx] = 1.0
return y_proba
def predict(self, X):
y_proba = self.predict_proba(X)
y_pred = y_proba.argmax(axis=1)
return self.classes_[y_pred]
def score(self, X, y):
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X))
class MinimalRegressor:
"""Minimal regressor implementation with inheriting from BaseEstimator.
This estimator should be tested with:
* `check_estimator` in `test_estimator_checks.py`;
* within a `Pipeline` in `test_pipeline.py`;
* within a `SearchCV` in `test_search.py`.
"""
_estimator_type = "regressor"
def __init__(self, param=None):
self.param = param
def get_params(self, deep=True):
return {"param": self.param}
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
return self
def fit(self, X, y):
X, y = check_X_y(X, y)
self.is_fitted_ = True
self._mean = np.mean(y)
return self
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return np.ones(shape=(X.shape[0],)) * self._mean
def score(self, X, y):
from sklearn.metrics import r2_score
return r2_score(y, self.predict(X))
class MinimalTransformer:
"""Minimal transformer implementation with inheriting from
BaseEstimator.
This estimator should be tested with:
* `check_estimator` in `test_estimator_checks.py`;
* within a `Pipeline` in `test_pipeline.py`;
* within a `SearchCV` in `test_search.py`.
"""
def __init__(self, param=None):
self.param = param
def get_params(self, deep=True):
return {"param": self.param}
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
return self
def fit(self, X, y=None):
X = check_array(X)
self.is_fitted_ = True
return self
def transform(self, X, y=None):
check_is_fitted(self)
X = check_array(X)
return X
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X, y)
| bsd-3-clause |
PNProductions/py-seam-carving | tests/graphcut_design.py | 1 | 1641 | import numpy as np
from seamcarving.video_reduce import video_seam_carving_decomposition
import networkx as nx
import matplotlib.pyplot as plt
def plot_graph_3d(graph, nodes_shape, plot_terminal=True, plot_weights=True, font_size=7):
w_h = nodes_shape[1] * nodes_shape[2]
X, Y = np.mgrid[:nodes_shape[1], :nodes_shape[2]]
aux = np.array([Y.ravel(), X[::-1].ravel()]).T
positions = {i: aux[i] for i in xrange(w_h)}
for i in xrange(1, nodes_shape[0]):
for j in xrange(w_h):
positions[w_h * i + j] = [positions[j][0] + 0.3 * i, positions[j][1] + 0.2 * i]
positions['s'] = np.array([-1, nodes_shape[1] / 2.0 - 0.5])
positions['t'] = np.array([nodes_shape[2] + 0.2 * nodes_shape[0], nodes_shape[1] / 2.0 - 0.5])
nxg = graph.get_nx_graph()
if not plot_terminal:
nxg.remove_nodes_from(['s', 't'])
nx.draw(nxg, pos=positions)
nx.draw_networkx_labels(nxg, pos=positions)
if plot_weights:
edge_labels = dict([((u, v,), d['weight'])
for u, v, d in nxg.edges(data=True)])
nx.draw_networkx_edge_labels(nxg,
pos=positions,
edge_labels=edge_labels,
label_pos=0.3,
font_size=font_size)
plt.axis('equal')
plt.show()
# Image = (np.ones((2, 5, 5)) * 3).astype(np.uint64) # np.arange(50).reshape(2, 5, 5)
Image = np.arange(72).reshape(2, 6, 6)
subject = video_seam_carving_decomposition(Image, 0, 0, False)
g, nodeids = subject.generate_graph(Image)
plot_graph_3d(g, nodeids.shape)
| mit |
sankalpg/Essentia_tonicDebug_TEMP | src/examples/python/experimental/beatogram.py | 10 | 26647 | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
import os, sys
from os.path import join
import essentia
from essentia.streaming import *
import essentia.standard as std
from pylab import median, mean, argmax
import matplotlib
#matplotlib.use('Agg') # in order to not grab focus on screen while batch processing
import matplotlib.pyplot as pyplot
import numpy as np
from numpy import shape, zeros, fabs
# for key input
import termios, sys, os, subprocess
TERMIOS = termios
import copy
# for alsa
if sys.platform =='linux2':
import wave, alsaaudio
import time
import thread
barkBands = [0.0, 50.0, 100.0, 150.0, 200.0, 300.0, 400.0, 510.0, 630.0, 770.0,
920.0, 1080.0, 1270.0, 1480.0, 1720.0, 2000.0, 2320.0, 2700.0,
3150.0, 3700.0, 4400.0, 5300.0, 6400.0, 7700.0,
9500.0, 12000.0, 15500.0, 20500.0, 27000.0]
scheirerBands = [ 0.0, 200.0, 400.0, 800.0, 1600.0, 3200.0, 22000.0]
scheirerBands_extended = [ 0.0, 50.0, 100.0, 150.0, 200.0, 400.0, 800.0, 1600.0, 3200.0, 5000.0, 10000.0]
EqBands = [20.0, 150.0, 400.0, 3200.0, 7000.0, 22000.0]
EqBands2 =[0.0, 75.0, 150.0, 400.0, 3200.0, 7000.0]
DOWNMIX ='mix'
# defines for novelty curve:
FRAMESIZE = 1024
HOPSIZE = FRAMESIZE/2
WEIGHT='flat' #'supplied' #'flat'
SAMPLERATE=44100.0
WINDOW='hann' #'blackmanharris92'
BEATWINDOW=16 # number of beats where to compute statistics
# tempogram defines:
FRAMERATE = float(SAMPLERATE)/HOPSIZE
TEMPO_FRAMESIZE = 4;
TEMPO_OVERLAP=2;
STARTTIME = 0
ENDTIME = 2000
def computeOnsets(filename, pool):
loader = EasyLoader(filename=filename,
sampleRate=pool['samplerate'],
startTime=STARTTIME, endTime=ENDTIME,
downmix=pool['downmix'])
onset = OnsetRate()
loader.audio >> onset.signal
onset.onsetTimes >> (pool, 'ticks')
onset.onsetRate >> None
essentia.run(loader)
pool.set('size', loader.audio.totalProduced())
pool.set('length', pool['size']/pool['samplerate'])
def computeSegmentation(filename, pool):
sampleRate = 44100
frameSize = 2048
hopSize = frameSize/2
audio = EqloudLoader(filename = filename,
downmix=pool['downmix'],
sampleRate=sampleRate)
fc = FrameCutter(frameSize=frameSize, hopSize=hopSize, silentFrames='keep')
w = Windowing(type='blackmanharris62')
spec = Spectrum()
mfcc = MFCC(highFrequencyBound=8000)
tmpPool = essentia.Pool()
audio.audio >> fc.signal
fc.frame >> w.frame >> spec.frame
spec.spectrum >> mfcc.spectrum
mfcc.bands >> (tmpPool, 'mfcc_bands')
mfcc.mfcc>> (tmpPool, 'mfcc_coeff')
essentia.run(audio)
# compute transpose of features array, don't call numpy.matrix.transpose
# because essentia fucks it up!!
features = copy.deepcopy(tmpPool['mfcc_coeff'].transpose())
segments = std.SBic(cpw=1.5, size1=1000, inc1=300, size2=600, inc2=50)(features)
for segment in segments:
pool.add('segments', segment*hopSize/sampleRate)
#print pool['segments']
def computeNoveltyCurve(filename, pool):
loader = EasyLoader(filename=filename,
sampleRate=pool['samplerate'],
startTime=STARTTIME, endTime=ENDTIME,
downmix=pool['downmix'])
fc = FrameCutter(frameSize=int(pool['framesize']),
silentFrames ='noise',
hopSize=int(pool['hopsize']),
startFromZero=False)
window = Windowing(type=pool['window'],
zeroPhase=False)
#freqBands = FrequencyBands(frequencyBands=EqBands, sampleRate=pool['samplerate'])
freqBands = FrequencyBands(sampleRate=pool['samplerate'])
spec = Spectrum()
hfc = HFC()
loader.audio >> fc.signal
fc.frame >> window.frame >> spec.frame
spec.spectrum >> freqBands.spectrum
spec.spectrum >> hfc.spectrum
freqBands.bands >> (pool, 'frequency_bands')
hfc.hfc >> (pool, 'hfc')
essentia.run(loader)
pool.set('size', loader.audio.totalProduced())
pool.set('length', pool['size']/pool['samplerate'])
# compute a weighting curve that is according to frequency bands:
frequencyBands = pool['frequency_bands']
nFrames = len(frequencyBands)
weightCurve= np.sum(frequencyBands, axis=0)
weightCurve = [val/float(nFrames) for val in weightCurve]
weightCurve = essentia.normalize(weightCurve)
#pyplot.plot(weightCurve)
#pyplot.show()
noveltyCurve = std.NoveltyCurve(frameRate=pool['framerate'],
weightCurveType=pool['weight'],
weightCurve=weightCurve)(frequencyBands)
#for x in noveltyCurve: pool.add('novelty_curve', x)
#return
# derivative of hfc seems to help in finding more precise beats...
hfc = essentia.normalize(pool['hfc'])
dhfc = essentia.derivative(hfc)
for i, val in enumerate(dhfc):
if val< 0: continue
noveltyCurve[i] += val
# low pass filter novelty curve:
env = std.Envelope(attackTime=2./pool['framerate'],
releaseTime=2./pool['framerate'])(noveltyCurve)
# apply median filter:
windowSize = 8 #samples
size = len(env)
filtered = zeros(size)
for i in range(size):
start = i-windowSize
if start < 0: start = 0
end = start + windowSize
if end > size:
end = size
start = size-windowSize
filtered[i] = env[i] - np.median(env[start:end])
if filtered[i] < 0: filtered[i] = 0
#pyplot.subplot(311)
#pyplot.plot(noveltyCurve)
#pyplot.subplot(312)
#pyplot.plot(env, 'r')
#pyplot.subplot(313)
#pyplot.plot(filtered, 'g')
#pyplot.show()
#for x in noveltyCurve: pool.add('novelty_curve', x)
for x in filtered: pool.add('novelty_curve', x)
def computeBeats(filename, pool):
computeNoveltyCurve(filename, pool)
recompute = True
novelty = pool['novelty_curve']
count = 0
bpmTolerance = 5
while recompute:
gen = VectorInput(novelty)
bpmHist = BpmHistogram(frameRate=pool['framerate'],
frameSize=pool['tempo_framesize'],
overlap=int(pool['tempo_overlap']),
maxPeaks=50,
windowType='hann',
minBpm=40.0,
maxBpm=1000.0,
normalize=False,
constantTempo=False,
tempoChange=5,
weightByMagnitude=True)
gen.data >> bpmHist.novelty
bpmHist.bpm >> (pool, 'peaksBpm')
bpmHist.bpmMagnitude >> (pool, 'peaksMagnitude')
bpmHist.harmonicBpm >> (pool, 'harmonicBpm')
bpmHist.harmonicBpm >> (pool, 'harmonicBpm')
bpmHist.confidence >> (pool, 'confidence')
bpmHist.ticks >> (pool, 'ticks')
bpmHist.ticksMagnitude >> (pool, 'ticksMagnitude')
bpmHist.sinusoid >> (pool, 'sinusoid')
essentia.run(gen)
## get rid of beats of beats > audio.length
#ticks = []
#ticksAmp = []
#for t, amp in zip(pool['ticks'], pool['ticksMagnitude']):
# if t < 0 or t > pool['length']: continue
# ticks.append(float(t))
# ticksAmp.append(float(amp))
#step = pool['step']
#ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
sine = pool['sinusoid']
#pyplot.plot(novelty, 'k')
#pyplot.plot(sine, 'r')
#for i in range(len(novelty)-1):
# diff = novelty[i+1]-novelty[i]
# if diff > 0: novelty[i] = diff
# else: novelty[i] = 0
#pyplot.plot(novelty, 'r')
prodPulse = zeros(len(novelty))
i = 0
while i < len(novelty):
if sine[i] <= 0.1:
i += 1
continue
window = []
while sine[i] != 0 and i < len(novelty):
window.append(novelty[i]*sine[i])
i+=1
peakPos = argmax(window)
peakPos = i - len(window) + peakPos
prodPulse[peakPos] = novelty[peakPos]
#pyplot.plot(prodPulse, 'g')
#pyplot.show()
ticks = []
ticksAmp = []
frameRate = pool['framerate']
bpms = pool['harmonicBpm']
print 'estimated bpm:', bpms
tatum = 60./bpms[0]
diffTick = 2*tatum
prevTick = -1
prevAmp = -1
for i, x in enumerate(prodPulse):
if x != 0:
newTick = float(i)/frameRate
if newTick < 0 or newTick > pool['length']: continue
ticks.append(newTick)
ticksAmp.append(x)
# if x != 0:
# newTick = float(i)/frameRate
# if prevTick < 0:
# ticks.append(newTick)
# ticksAmp.append(x)
# prevTick = newTick
# prevAmp = x
# else:
# diff = newTick-prevTick
# ratio = max( round(tatum/diff), round(diff/tatum))
# if (diff >= 0.9*tatum*ratio) and (diff <= 1.1*tatum*ratio):
# ticks.append(newTick)
# ticksAmp.append(x)
# prevTick = newTick
# prevAmp = x
# else: #(newTick-prevTick) < 0.75*tatum:
# newTick = (newTick*x+prevTick*prevAmp)/(x+prevAmp)
# ticks[-1] = newTick
# ticksAmp[-1] = (x+prevAmp)/2.
# prevTick = newTick
# prevAmp = (x+prevAmp)/2.
_, _, bestBpm= getMostStableTickLength(ticks)
#pool.set('bestTicksStart', bestTicks[0])
#pool.set('bestTicksEnd', bestTicks[0] + bestTicks[1])
#ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
#ticks = essentia.postProcessTicks(ticks)
if fabs(bestBpm - bpms[0]) < bpmTolerance: recompute = False
else:
count+=1
if count >= 5:
bpmTolerance += 1
count = 0
print "recomputing!!!!"
novelty = copy.deepcopy(pool['sinusoid'])
pool.remove('sinusoid')
pool.remove('novelty_curve')
pool.remove('peaksBpm')
pool.remove('peaksMagnitude')
pool.remove('harmonicBpm')
pool.remove('harmonicBpm')
pool.remove('confidence')
pool.remove('ticks')
pool.remove('ticksMagnitude')
#pyplot.plot(prodPulse, 'g')
#pyplot.show()
print 'estimated bpm:', bpms
print 'bpms:', pool['peaksBpm']
#ticks = postProcessTicks(filename, pool)
#print 'bpm mags:', pool['peaksMagnitude']
bpmRatios = []
#for i, bpm1 in enumerate(bpms):
# bpmRatios.append([float(bpm1)/float(bpm2) for bpm2 in bpms[i:]])
#print 'bpmRatios:', bpmRatios
#print 'original nticks:', len(ticks)
#print 'step:', step
if step>1:
ticks = essentia.array(map(lambda i: ticks[i],
filter(lambda i: i%step == 0,range(len(ticks)))))
#print 'nticks:', len(ticks)
pool.remove('ticks')
pool.set('ticks', ticks)
def longestChain(dticks, startpos, period, tolerance):
pos = startpos
ubound = period*(1+tolerance)
lbound = period*(1-tolerance)
while (pos < len(dticks)) and\
(lbound < dticks[pos] and dticks[pos] < ubound):
pos += 1
return pos - startpos
def getMostStableTickLength(ticks):
nticks = len(ticks)
dticks = zeros(nticks-1)
for i in range(nticks-1):
dticks[i] = (ticks[i+1] - ticks[i])
hist, distx = np.histogram(dticks, bins=50*(1+(max(dticks)-min(dticks))))
bestPeriod = distx[argmax(hist)] # there may be more than one candidate!!
bestBpm = 60./bestPeriod
print 'best period', bestPeriod
print 'best bpm:', bestBpm
#print 'hist:', hist, distx
maxLength = 0
idx = 0
for startpos in range(nticks-1):
l = longestChain(dticks, startpos, bestPeriod, 0.1)
if l > maxLength :
maxLength = l;
idx = startpos;
print 'max stable length:', idx, maxLength
return idx, maxLength, bestBpm
def postProcessTicks(audioFilename, pool):
'''Computes delta energy in order to find the correct position of the ticks'''
# get rid of beats of beats > audio.length
ticks = []
ticksAmp = []
for t, amp in zip(pool['ticks'], pool['ticksMagnitude']):
if t < 0 or t > pool['length']: continue
ticks.append(float(t))
ticksAmp.append(float(amp))
step = pool['step']
#ticks = essentia.postProcessTicks(ticks, ticksAmp, 60./pool['harmonicBpm'][0]);
#beatWindowDuration = 0.01 # seconds
#beatDuration = 0.005 # seconds
#rmsFrameSize = 64
#rmsHopSize = rmsFrameSize/2
#audio = std.MonoLoader(filename=audioFilename,
# sampleRate=pool['samplerate'],
# downmix=pool['downmix'])()
#for i, tick in enumerate(ticks):
# startTime = tick - beatWindowDuration/2.0
# if startTime < 0: startTime = 0
# endTime = startTime + beatWindowDuration + beatDuration + 0.0001
# slice = std.Trimmer(sampleRate=pool['samplerate'],
# startTime=startTime,
# endTime=endTime)(audio)
# frames = std.FrameGenerator(slice, frameSize=rmsFrameSize, hopSize=rmsHopSize)
# maxDeltaRms=0
# RMS = std.RMS()
# prevRms = 0
# pos = 0
# tickPos = pos
# for frame in frames:
# rms = RMS(frame)
# diff = rms - prevRms
# if diff > maxDeltaRms:
# tickPos = pos
# maxDeltaRms = diff
# pos+=1
# prevRms = rms
# ticks[i]= tick + tickPos*float(rmsHopSize)/pool['samplerate']
return ticks
def writeBeatFile(filename, pool) :
beatFilename = os.path.splitext(filename)[0] + '_beat.wav' #'out_beat.wav' #
audio = EasyLoader(filename=filename, downmix='mix', startTime=STARTTIME, endTime=ENDTIME)
writer = MonoWriter(filename=beatFilename)
onsetsMarker = AudioOnsetsMarker(onsets=pool['ticks'])
audio.audio >> onsetsMarker.signal >> writer.audio
essentia.run(audio)
return beatFilename
def computeBeatsLoudness(filename, pool):
loader = MonoLoader(filename=filename,
sampleRate=pool['samplerate'],
downmix=pool['downmix'])
ticks = pool['ticks']#[pool['bestTicksStart']:pool['bestTicksStart']+32]
beatsLoud = BeatsLoudness(sampleRate = pool['samplerate'],
frequencyBands = barkBands, #EqBands, #scheirerBands, #barkBands,
beats=ticks)
loader.audio >> beatsLoud.signal
beatsLoud.loudness >> (pool, 'loudness')
beatsLoud.loudnessBandRatio >> (pool, 'loudnessBandRatio')
essentia.run(loader)
def computeSpectrum(signal):
#gen = VectorInput(signal)
#fc = FrameCutter(startFromZero=False, frameSize=48, hopSize=1)
#w = Windowing(zeroPhase=False)
#spec = Spectrum()
#p = essentia.Pool()
#gen.data >> fc.signal
#fc.frame >> w.frame >> spec.frame
#spec.spectrum >> (p,'spectrum')
#essentia.run(gen)
#pyplot.imshow(p['spectrum'], cmap=pyplot.cm.hot, aspect='auto', origin='lower')
corr = std.AutoCorrelation()(signal)
pyplot.plot(corr)
pyplot.show()
print argmax(corr[2:])+2
def isPowerTwo(n):
return (n&(n-1))==0
def isEvenHarmonic(a,b):
if a < 2 or b < 2: return False
if (a<b): return isEvenHarmonic(b,a)
return (a%b == 0) and isPowerTwo(a/b)
def getHarmonics(array):
size = len(array)
hist = [0]*size
counts = [1]*size
for idx1, x in enumerate(array):
for idx2, y in enumerate(array):
if isEvenHarmonic(idx1, idx2):
hist[idx1] += y
counts[idx1] += 1
hist = [hist[i]/float(counts[i]) for i in range(size)]
return hist
def plot(pool, title, outputfile='out.svg', subplot=111):
''' plots bars for each beat'''
#computeSpectrum(pool['loudness'])
ticks = pool['ticks']
#barSize = min([ticks[i+1] - ticks[i] for i in range(len(ticks[:-1]))])/2.
barSize = 0.8
offset = barSize/2.
loudness = pool['loudness']
loudnessBand = pool['loudnessBandRatio'] # ticks x bands
medianRatiosPerTick = []
meanRatiosPerTick = []
for tick, energy in enumerate(loudnessBand):
medianRatiosPerTick.append(median(energy))
meanRatiosPerTick.append(mean(energy))
loudnessBand = copy.deepcopy(loudnessBand.transpose()) # bands x ticks
#xcorr = std.CrossCorrelation(minLag=0, maxLag=16)
#acorr = std.AutoCorrelation()
#bandCorr = []
#for iBand, band in enumerate(loudnessBand):
# bandCorr.append(acorr(essentia.array(band)))
nBands = len(loudnessBand)
nticks = len(loudness)
maxRatiosPerBand = []
medianRatiosPerBand = []
meanRatiosPerBand = []
for idxBand, band in enumerate(loudnessBand):
maxRatiosPerBand.append([0]*nticks)
medianRatiosPerBand.append([0]*nticks)
meanRatiosPerBand.append([0]*nticks)
for idxTick in range(nticks):
start = idxTick
end = start+BEATWINDOW
if (end>nticks):
howmuch = end-nticks
end = nticks-1
start = end-howmuch
if start < 0: start = 0
medianRatiosPerBand[idxBand][idxTick] = median(band[start:end])
maxRatiosPerBand[idxBand][idxTick] = max(band[start:end])
meanRatiosPerBand[idxBand][idxTick] = mean(band[start:end])
for iBand, band in enumerate(loudnessBand):
for tick, ratio in enumerate(band):
#if ratio < medianRatiosPerBand[iBand][tick] and\
# ratio <= medianRatiosPerTick[tick]: loudnessBand[iBand][tick]=0
bandThreshold = max(medianRatiosPerBand[iBand][tick],
meanRatiosPerBand[iBand][tick])
tickThreshold = max(medianRatiosPerTick[tick],
meanRatiosPerTick[tick])
if ratio < bandThreshold and ratio <= tickThreshold:
loudnessBand[iBand][tick]=0
else:
loudnessBand[iBand][tick] *= loudness[tick]
#if loudnessBand[iBand][tick] > 1 : loudnessBand[iBand][tick] = 1
acorr = std.AutoCorrelation()
bandCorr = []
maxCorr = []
for iBand, band in enumerate(loudnessBand):
bandCorr.append(acorr(essentia.array(band)))
maxCorr.append(argmax(bandCorr[-1][2:])+2)
# use as much window space as possible:
pyplot.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)
pyplot.subplot(511)
pyplot.imshow(bandCorr, cmap=pyplot.cm.hot, aspect='auto', origin='lower', interpolation='nearest')
print 'max correlation', maxCorr
sumCorr = []
for tick in range(nticks):
total = 0
for band in bandCorr:
total += band[tick]
sumCorr.append(total)
sumCorr[0] = 0
sumCorr[1] = 0
pyplot.subplot(512)
maxAlpha = max(sumCorr)
for i,val in enumerate(sumCorr):
alpha = max(0,min(val/maxAlpha, 1))
pyplot.bar(i, 1 , barSize, align='edge',
bottom=0,alpha=alpha,
color='r', edgecolor='w', linewidth=.3)
print 'max sum correlation', argmax(sumCorr[2:])+2
hist = getHarmonics(sumCorr)
maxHist = argmax(hist)
print 'max histogram', maxHist
#for idx,val in enumerate(hist):
# if val < maxHist: hist[idx] = 0
pyplot.subplot(513)
for i,val in enumerate(hist):
pyplot.bar(i, val , barSize, align='edge',
bottom=0, color='r', edgecolor='w', linewidth=.3)
peakDetect = std.PeakDetection(maxPeaks=5,
orderBy='amplitude',
minPosition=0,
maxPosition=len(sumCorr)-1,
range=len(sumCorr)-1)
peaks = peakDetect(sumCorr)[0]
peaks = [round(x+1e-15) for x in peaks]
print 'Peaks:',peaks
pyplot.subplot(514)
maxAlpha = max(sumCorr)
for i,val in enumerate(sumCorr):
alpha = max(0,min(val/maxAlpha, 1))
pyplot.bar(i, val, barSize, align='edge',
bottom=0,alpha=alpha,
color='r', edgecolor='w', linewidth=.3)
# multiply both histogram and sum corr to have a weighted histogram:
wHist = essentia.array(hist)*sumCorr*acorr(loudness)
maxHist = argmax(wHist)
print 'max weighted histogram', maxHist
pyplot.subplot(515)
maxAlpha = max(wHist)
for i,val in enumerate(wHist):
alpha = max(0,min(val/maxAlpha, 1))
pyplot.bar(i, val, barSize, align='edge',
bottom=0,alpha=alpha,
color='r', edgecolor='w', linewidth=.3)
pyplot.savefig(outputfile, dpi=300)
#pyplot.show()
return
def ossplay(filename): # play audio thru oss
from wave import open as waveOpen
from ossaudiodev import open as ossOpen
s = waveOpen(filename,'rb')
(nc,sw,fr,nf,comptype, compname) = s.getparams( )
dsp = ossOpen('/dev/dsp','w')
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
dsp.setparameters(AFMT_S16_NE, nc, fr)
data = s.readframes(nf)
s.close()
dsp.write(data)
dsp.close()
def getkey(audioFilename, device, f, card, lock):
c = None
b = True
while b:
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd)
#new = termios.tcgetattr(fd)
#new[3] = new[3] & ~TERMIOS.ICANON & ~TERMIOS.ECHO
#new[6][TERMIOS.VMIN] = 1
#new[6][TERMIOS.VTIME] = 0
#termios.tcsetattr(fd, TERMIOS.TCSANOW, new)
#c = None
lock.acquire()
#try:
# c = os.read(fd, 1)
#finally:
# termios.tcsetattr(fd, TERMIOS.TCSAFLUSH, old)
#if c == '\n': ## break on a Return/Enter keypress
# b = False
# return
#if c==' ': playAudio(audioFilename)
#else: print 'got', c
#ossplay(audioFilename)
alsaplay(audioFilename, device, f, card)
lock.release()
time.sleep(0.1)
def alsaplay(filename, device, f, card):
device.setchannels(f.getnchannels())
device.setrate(f.getframerate())
# 8bit is unsigned in wav files
if f.getsampwidth() == 1:
device.setformat(alsaaudio.PCM_FORMAT_U8)
# Otherwise we assume signed data, little endian
elif f.getsampwidth() == 2:
device.setformat(alsaaudio.PCM_FORMAT_S16_LE)
elif f.getsampwidth() == 3:
device.setformat(alsaaudio.PCM_FORMAT_S24_LE)
elif f.getsampwidth() == 4:
device.setformat(alsaaudio.PCM_FORMAT_S32_LE)
else:
raise ValueError('Unsupported format')
device.setperiodsize(320)
data = f.readframes(320)
while data:
device.write(data)
data = f.readframes(320)
f.setpos(0)
if __name__ == '__main__':
if len(sys.argv) < 1:
usage()
sys.exit(1)
step = 1
if len(sys.argv) > 2:
step = int(sys.argv[-1])
inputfilename = sys.argv[1]
ext = os.path.splitext(inputfilename)[1]
if ext == '.txt': # input file contains a list of audio files
files = open(inputfilename).read().split('\n')[:-1]
else: files = [inputfilename]
for audiofile in files:
print "*"*70
print "Processing ", audiofile
print "*"*70
try:
print 'realBpm', open(audiofile.replace('wav', 'bpm')).read()
except:
print 'realBpm not found'
pool = essentia.Pool()
pool.set('downmix', DOWNMIX)
pool.set('framesize', FRAMESIZE)
pool.set('hopsize', HOPSIZE)
pool.set('weight', WEIGHT)
pool.set('samplerate', SAMPLERATE)
pool.set('window', WINDOW)
pool.set('framerate', FRAMERATE)
pool.set('tempo_framesize', TEMPO_FRAMESIZE)
pool.set('tempo_overlap', TEMPO_OVERLAP)
pool.set('step', step)
#computeSegmentation(audiofile, pool)
#segments = pool['segments']
computeBeats(audiofile, pool)
beatFilename = writeBeatFile(audiofile, pool)
computeBeatsLoudness(audiofile, pool)
imgfilename = os.path.splitext(audiofile)[0]+'.png'
#imgfilename = imgfilename.split(os.sep)[-1]
#print 'plotting', imgfilename
if sys.platform == 'darwin' or sys.platform == 'win32':
plot(pool,'beats loudness ' + str(audiofile), imgfilename);
else:
card = 'default'
f = wave.open(beatFilename, 'rb')
# print '%d channels, sampling rate: %d \n' % (f.getnchannels(),
# f.getframerate())
device = alsaaudio.PCM(card=card)
lock = thread.allocate_lock()
thread.start_new_thread(getkey, (beatFilename, device, f, card, lock))
plot(pool,'beats loudness ' + audiofile, imgfilename);
f.close()
thread.exit()
#print 'deleting beatfile:', beatFilename
#subprocess.call(['rm', beatFilename])
| agpl-3.0 |
jmetzen/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
Tasignotas/topographica_mirror | topo/tkgui/topoconsole.py | 1 | 29464 | """
TopoConsole class file.
"""
# CB: does the status bar need to keep saying 'ok'? Sometimes
# positive feedback is useful, but 'ok' doesn't seem too helpful.
import os
import copy
import sys
import re
import webbrowser
import string
from Tkinter import Frame, Button, \
LEFT, YES, Label, DISABLED, \
NORMAL, DoubleVar
from tkFileDialog import asksaveasfilename,askopenfilename
import __main__
import param
from param import normalize_path,resolve_path
import paramtk as tk
from collections import OrderedDict
import topo
from topo.plotting.plotgroup import plotgroups, FeatureCurvePlotGroup
from topo.misc.commandline import sim_name_from_filename
import topo.misc.genexamples
import topo.command
import topo.tkgui
from templateplotgrouppanel import TemplatePlotGroupPanel
from featurecurvepanel import FeatureCurvePanel
from projectionpanel import SheetPanel,CFProjectionPanel,ProjectionActivityPanel,ConnectionFieldsPanel,RFProjectionPanel
from testpattern import TestPattern
from editor import ModelEditor
tk.AppWindow.window_icon_path = resolve_path('tkgui/icons/topo.xbm')
SCRIPT_FILETYPES = [('Topographica scripts','*.ty'),
('Python scripts','*.py'),
('All files','*')]
SAVED_FILE_EXTENSION = '.typ'
SAVED_FILETYPES = [('Topographica saved networks',
'*'+SAVED_FILE_EXTENSION),
('All files','*')]
turl = "http://topographica.org/"
userman = "User_Manual/index.html"
tuts = "Tutorials/index.html"
refman = "Reference_Manual/index.html"
plotman = "User_Manual/plotting.html"
# for deb on ubuntu; will need to check others
pkgdoc = "/usr/share/doc/topographica/doc/"
# Documentation locations: locally built and web urls.
user_manual_locations = ('doc/'+userman,
pkgdoc+userman,
turl+userman)
tutorials_locations = ('doc/'+tuts,
pkgdoc+tuts,
turl+tuts)
reference_manual_locations = ('doc/'+refman,
pkgdoc+refman,
turl+refman)
python_doc_locations = ('http://www.python.org/doc/',)
topo_www_locations = (turl,)
plotting_help_locations = ('doc/'+plotman,
pkgdoc+plotman,
turl+plotman)
# If a particular plotgroup_template needs (or works better with) a
# specific subclass of PlotPanel, the writer of the new subclass
# or the plotgroup_template can declare here that that template
# should use a specific PlotPanel subclass. For example:
# plotpanel_classes['Hue Pref Map'] = HuePreferencePanel
plotpanel_classes = {}
# CEBALERT: why are the other plotpanel_classes updates at the end of this file?
def open_plotgroup_panel(class_,plotgroup=None,**kw):
if class_.valid_context():
win = topo.guimain.some_area.new_window()
panel = class_(win,plotgroup=plotgroup,**kw)
if not panel.dock:
topo.guimain.some_area.eject(win)
else:
topo.guimain.some_area.consume(win)
panel.refresh_title()
panel.pack(expand='yes',fill='both')
win.sizeright()
#frame.sizeright()
#topo.guimain.messageBar.message('state', 'OK')
return panel
else:
topo.guimain.messageBar.response(
'No suitable objects in this simulation for this operation.')
class PlotsMenuEntry(param.Parameterized):
"""
Stores information about a Plots menu command
(including the command itself, and the plotgroup template).
"""
def __init__(self,plotgroup,class_=TemplatePlotGroupPanel,**params):
"""
Store the template, and set the class that will be created by this menu entry
If users want to extend the Plot Panel classes, then they
should add entries to the plotpanel_classes dictionary.
If no entry is defined there, then the default class is used.
The class_ is overridden for any special cases listed in this method.
"""
super(PlotsMenuEntry,self).__init__(**params)
self.plotgroup = plotgroup
# Special cases. These classes are specific to the topo/tkgui
# directory and therefore this link must be made within the tkgui
# files.
if isinstance(self.plotgroup,FeatureCurvePlotGroup):
class_ = plotpanel_classes.get(self.plotgroup.name,FeatureCurvePanel)
self.class_ = plotpanel_classes.get(self.plotgroup.name,class_)
def __call__(self,event=None,**kw):
"""
Instantiate the class_ (used as menu commands' 'command' attribute).
Keyword args are passed to the class_.
"""
new_plotgroup = copy.deepcopy(self.plotgroup)
# CB: hack to share plot_templates with the current
# plotgroup in plotgroups
new_plotgroup.plot_templates = topo.plotting.plotgroup.plotgroups[self.plotgroup.name].plot_templates
return open_plotgroup_panel(self.class_,new_plotgroup,**kw)
# Notebook only available for Tkinter>=8.5
try:
from paramtk.tilewrapper import Notebook
class DockManager(Notebook):
"""Manages windows that can be tabs in a notebook, or toplevels."""
def __init__(self, master=None, cnf={}, **kw):
Notebook.__init__(self, master, cnf=cnf, **kw)
self._tab_ids = {}
def _set_tab_title(self,win,title):
self.tab(self._tab_ids[win],text=title)
def _set_toplevel_title(self,win,title):
prefix = topo.sim.name+": "
if not title.startswith(prefix):
title=prefix+title
self.tk.call("wm","title",win._w,title)
def add(self, child, cnf={}, **kw):
self._tab_ids[child]=len(self.tabs())
Notebook.add(self,child,cnf=cnf,**kw)
## def unhide(self,win):
## if win in self._tab_ids:
## self.tab(self._tab_ids[win],state='normal')
def new_window(self):
win = tk.AppWindow(self,status=True)
#self.consume(win)
return win
def consume(self,win):
if win not in self._tab_ids:
self.tk.call('wm','forget',win._w)
win.title = lambda x: self._set_tab_title(win,x)
self.add(win)
def eject(self,win):
if win in self._tab_ids:
self.forget(self._tab_ids[win])
# manage my tab ids (HACK)
del self._tab_ids[win]
for w in self._tab_ids:
self._tab_ids[w]-=1
self._tab_ids[w]=max(self._tab_ids[w],0)
self.tk.call('wm','manage',win._w)
win.renew()
win.title = lambda x: self._set_toplevel_title(win,x)
return win
except ImportError:
class FakeDockManager(Frame):
def _set_tab_title(self,*args):
pass
def _set_toplevel_title(self,win,title):
prefix = topo.sim.name+": "
if not title.startswith(prefix):
title=prefix+title
self.tk.call("wm","title",win._w,title)
def add(self,*args):
pass
def new_window(self):
win = tk.AppWindow(self,status=True)
return win
def consume(self,win):
pass
def eject(self,win):
win.renew()
win.title = lambda x: self._set_toplevel_title(win,x)
return win
DockManager = FakeDockManager
# This is really a hack. There doesn't seem to be any easy way to tie
# an exception to the window from which it originated. (I couldn't
# find an example of tkinter software displaying a gui exception on
# the originating window.)
def _tkinter_report_exception(widget):
exc, val = sys.exc_type, sys.exc_value
msg = "(%s) %s"%(exc.__name__,val)
# If the supplied widget has no master, it's probably the Tk
# instance. In that case, resort to the 'last-one-set' hack (see
# CEBALERT "provide a way of allowing other gui components" in
# topo/param/tk.py).
if not widget.master:
widget = tk._last_one_set
stat = None
while (widget is not None and widget.master):
# CEBALERT: should rename all status bars to the same thing
# (status_bar)
if hasattr(widget,'status'):
stat = widget.status
break
elif hasattr(widget,'messageBar'):
stat = widget.messageBar
break
widget = widget.master
if stat is not None:
stat.error('%s'%msg)
else:
topo.guimain.messageBar.error('%s'%msg)
# BK-NOTE: Default is now to display full trace always. Any user
# errors should be caught as special exception cases
# BK-ALERT: Want to raise errors vs print, however this currently crashes ipython.
#raise
param.Parameterized().warning(msg)
import traceback
traceback.print_exc()
import Tkinter
class TopoConsole(tk.AppWindow,tk.TkParameterized):
"""
Main window for the Tk-based GUI.
"""
def _getmenubar(self):
return self.master.menubar
menubar = property(_getmenubar)
def __getitem__(self,menu_name):
"""Allow dictionary-style access to the menu bar."""
return self.menubar[menu_name]
def __init__(self, root,exit_on_quit=True, **params):
tk.AppWindow.__init__(self,root,status=True)
tk.TkParameterized.__init__(self,root,**params)
# Instead of displaying tracebacks on the commandline, try to display
# them on the originating window.
# CEBALERT: on destroy(), ought to revert this
Tkinter.Misc._report_exception=_tkinter_report_exception
self.exit_on_quit = exit_on_quit
self.auto_refresh_panels = []
self._init_widgets()
self.title(topo.sim.name) # If -g passed *before* scripts on commandline, this is useless.
# So topo.misc.commandline sets the title as its last action (if -g)
# catch click on the 'x': offers choice to quit or not
self.protocol("WM_DELETE_WINDOW",self.quit_topographica)
##########
### Make cascade menus open automatically on linux when the mouse
### is over the menu title.
### [Tkinter-discuss] Cascade menu issue
### http://mail.python.org/pipermail/tkinter-discuss/2006-August/000864.html
if topo.tkgui.system_platform is 'linux':
activate_cascade = """\
if {[%W cget -type] != {menubar} && [%W type active] == {cascade}} {
%W postcascade active
}
"""
self.bind_class("Menu", "<<MenuSelect>>", activate_cascade)
##########
# Install warning and message handling
from param.parameterized import Parameterized
self.__orig_P_warning = Parameterized.warning
#self.__orig_P_message = Parameterized.message
type.__setattr__(Parameterized,'warning',self.gui_warning)
#type.__setattr__(Parameterized,'message',self.gui_message)
def gui_warning(self,*args):
stat = self.__get_status_bar()
s = string.join(args,' ')
stat.warn(s)
self.__orig_P_warning(self,*args)
def gui_message(self,*args):
stat = self.__get_status_bar()
s = string.join(args,' ')
stat.message(s)
self.__orig_P_message(self,*args)
def title(self,t=None):
newtitle = "Topographica"
if t: newtitle+=": %s" % t
tk.AppWindow.title(self,newtitle)
def _init_widgets(self):
## CEBALERT: now we can have multiple operations at the same time,
## status bar could be improved to show all tasks?
# CEBALERT
self.messageBar = self.status
self.some_area = DockManager(self)
self.some_area.pack(fill="both", expand=1)
### Balloon, for pop-up help
self.balloon = tk.Balloon(self.content)
### Top-level (native) menu bar
#self.menubar = tk.ControllableMenu(self.content)
self.configure(menu=self.menubar)
#self.menu_balloon = Balloon(topo.tkgui.root)
# no menubar in tile yet
# http://news.hping.org/comp.lang.tcl.archive/4679.html
self.__simulation_menu()
self.__create_plots_menu()
self.refresh_plots_menu()
self.__help_menu()
### Running the simulation
run_frame = Frame(self.content)
run_frame.pack(side='top',fill='x',padx=4,pady=8)
self.run_frame = run_frame
Label(run_frame,text='Run for: ').pack(side=LEFT)
self.run_for_var=DoubleVar()
self.run_for_var.set(1.0)
run_for = tk.TaggedSlider(run_frame,
variable=self.run_for_var,
tag_width=11,
slider_length=150,
bounds=(0,20000))
self.balloon.bind(run_for,"Duration to run the simulation, e.g. 0.0500, 1.0, or 20000.")
run_for.pack(side=LEFT,fill='x',expand=YES)
run_for.tag.bind("<Return>",self.run_simulation)
# When return is pressed, the TaggedSlider updates itself...but we also want to run
# the simulation in this case.
run_frame.optional_action=self.run_simulation
go_button = Button(run_frame,text="Go",
command=self.run_simulation)
go_button.pack(side=LEFT)
self.balloon.bind(go_button,"Run the simulation for the specified duration.")
self.step_button = Button(run_frame,text="Step",command=self.run_step)
self.balloon.bind(self.step_button,"Run the simulation through the time at which the next events are processed.")
self.step_button.pack(side=LEFT)
self.sizeright()
def __simulation_menu(self):
"""Add the simulation menu options to the menubar."""
simulation_menu = ControllableMenu(self.menubar,tearoff=0)
self.menubar.add_cascade(label='Simulation',menu=simulation_menu)
simulation_menu.add_command(label='Run script',command=self.run_script)
simulation_menu.add_command(label='Save script',command=self.save_script_repr)
simulation_menu.add_command(label='Load snapshot',command=self.load_snapshot)
simulation_menu.add_command(label='Save snapshot',command=self.save_snapshot)
#simulation_menu.add_command(label='Reset',command=self.reset_network)
simulation_menu.add_command(label='Test Pattern',command=self.open_test_pattern)
simulation_menu.add_command(label='Model Editor',command=self.open_model_editor)
simulation_menu.add_command(label='Quit',command=self.quit_topographica)
def open_test_pattern(self):
return open_plotgroup_panel(TestPattern)
def __create_plots_menu(self):
"""
Add the plot menu to the menubar, with Basic plots on the menu itself and
others in cascades by category (the plots come from plotgroup_templates).
"""
plots_menu = ControllableMenu(self.menubar,tearoff=0)
self.menubar.add_cascade(label='Plots',menu=plots_menu)
# CEBALERT: should split other menus in same way as plots (create/refresh)
def refresh_plots_menu(self):
plots_menu = self['Plots']
plots_menu.delete(0,'end')
# create menu entries, and get list of categories
entries=OrderedDict() # keep the order of plotgroup_templates (which is also KL)
categories = []
for label,plotgroup in plotgroups.items():
entries[label] = PlotsMenuEntry(plotgroup)
categories.append(plotgroup.category)
categories = sorted(set(categories))
# The Basic category items appear on the menu itself.
assert 'Basic' in categories, "'Basic' is the category for the standard Plots menu entries."
for label,entry in entries.items():
if entry.plotgroup.category=='Basic':
plots_menu.add_command(label=label,command=entry.__call__)
categories.remove('Basic')
plots_menu.add_separator()
# Add the other categories to the menu as cascades, and the plots of each category to
# their cascades.
for category in categories:
category_menu = ControllableMenu(plots_menu,tearoff=0)
plots_menu.add_cascade(label=category,menu=category_menu)
# could probably search more efficiently than this
for label,entry in entries.items():
if entry.plotgroup.category==category:
category_menu.add_command(label=label,command=entry.__call__)
plots_menu.add_separator()
plots_menu.add_command(label="Help",command=(lambda x=plotting_help_locations: self.open_location(x)))
def __help_menu(self):
"""Add the help menu options."""
help_menu = ControllableMenu(self.menubar,tearoff=0,name='help')
self.menubar.add_cascade(label='Help',menu=help_menu)
help_menu.add_command(label='About',command=self.new_about_window)
help_menu.add_command(label="User Manual",
command=(lambda x=user_manual_locations: self.open_location(x)))
help_menu.add_command(label="Tutorials",
command=(lambda x=tutorials_locations: self.open_location(x)))
help_menu.add_command(label="Examples",
command=self.run_example_script)
help_menu.add_command(label="Reference Manual",
command=(lambda x=reference_manual_locations: self.open_location(x)))
help_menu.add_command(label="Topographica.org",
command=(lambda x=topo_www_locations: self.open_location(x)))
help_menu.add_command(label="Python documentation",
command=(lambda x=python_doc_locations: self.open_location(x)))
def quit_topographica(self,check=True,exit_status=0):
"""Quit topographica."""
if not check or (check and tk.askyesno("Quit Topographica","Really quit?")):
self.destroy()
# matplotlib's tk backend starts its own Tk instances; we
# need to close these ourselves (at least to avoid error
# message about 'unusual termination' in Windows).
try: # not that there should be an error, but just in case...
import matplotlib._pylab_helpers
for figman in matplotlib._pylab_helpers.Gcf.get_all_fig_managers():
figman.destroy()
except:
pass
self.message("Quit selected%s" % ("; exiting" if self.exit_on_quit else ""))
# Workaround for obscure problem on some UNIX systems
# as of 4/2007, probably including Fedora Core 5.
# On these systems, if Topographica is started from a
# bash prompt and then quit from the Tkinter GUI (as
# opposed to using Ctrl-D in the terminal), the
# terminal would suppress echoing of all future user
# input. stty sane restores the terminal to sanity,
# but it is not clear why this is necessary.
# For more info:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/68d0f33c8eb2e02d
if topo.tkgui.system_platform=="linux" and os.getenv('EMACS')!='t':
try: os.system("stty sane")
except: pass
# CEBALERT: re. above. Shouldn't we be able to store the
# output of "stty --save" before starting the gui, then
# ensure that when the gui exits (however badly it
# happens) run "stty saved_settings"?
# CEBALERT: there was no call to self.master.destroy()
if self.exit_on_quit:
sys.exit(exit_status)
def run_script(self):
"""
Dialog to run a user-selected script
The script is exec'd in __main__.__dict__ (i.e. as if it were specified on the commandline.)
"""
script = askopenfilename(initialdir=normalize_path(),filetypes=SCRIPT_FILETYPES)
if script in ('',(),None): # (representing the various ways no script was selected in the dialog)
self.messageBar.response('Run canceled')
else:
execfile(script,__main__.__dict__)
self.messageBar.response('Ran ' + script)
sim_name_from_filename(script)
self.title(topo.sim.name)
# CEBALERT: duplicates most of run_script()
def run_example_script(self):
script = askopenfilename(initialdir=topo.misc.genexamples.find_examples(),
filetypes=SCRIPT_FILETYPES)
if script in ('',(),None): # (representing the various ways no script was selected in the dialog)
self.messageBar.response('No example opened')
else:
execfile(script,__main__.__dict__)
self.messageBar.response('Ran ' + script)
sim_name_from_filename(script)
self.title(topo.sim.name)
def save_script_repr(self):
script_name = asksaveasfilename(filetypes=SCRIPT_FILETYPES,
initialdir=normalize_path(),
initialfile=topo.sim.basename()+"_script_repr.ty")
if script_name:
topo.command.save_script_repr(script_name)
self.messageBar.response('Script saved to ' + script_name)
def load_snapshot(self):
"""
Dialog to load a user-selected snapshot (see topo.command.load_snapshot() ).
"""
snapshot_name = askopenfilename(initialdir=normalize_path(),filetypes=SAVED_FILETYPES)
if snapshot_name in ('',(),None):
self.messageBar.response('No snapshot loaded.')
else:
self.messageBar.dynamicinfo('Loading snapshot (may take some time)...')
self.update_idletasks()
topo.command.load_snapshot(snapshot_name)
self.messageBar.response('Loaded snapshot ' + snapshot_name)
self.title(topo.sim.name)
self.auto_refresh()
def save_snapshot(self):
"""
Dialog to save a snapshot (see topo.command.save_snapshot() ).
Adds the file extension .typ if not already present.
"""
snapshot_name = asksaveasfilename(filetypes=SAVED_FILETYPES,
initialdir=normalize_path(),
initialfile=topo.sim.basename()+".typ")
if snapshot_name in ('',(),None):
self.messageBar.response('No snapshot saved.')
else:
if not snapshot_name.endswith('.typ'):
snapshot_name = snapshot_name + SAVED_FILE_EXTENSION
self.messageBar.dynamicinfo('Saving snapshot (may take some time)...')
self.update_idletasks()
topo.command.save_snapshot(snapshot_name)
self.messageBar.response('Snapshot saved to ' + snapshot_name)
def auto_refresh(self, update=True):
"""
Refresh all windows in auto_refresh_panels.
Panels can add and remove themselves to the list; those in the list
will have their refresh() method called whenever this console's
autorefresh() is called.
"""
for win in self.auto_refresh_panels:
win.refresh(update)
self.set_step_button_state()
self.update_idletasks()
### CEBERRORALERT: why doesn't updatecommand("display=True") for an
### orientation preference map measurement work with the
### hierarchical example? I guess this is the reason I thought the
### updating never worked properly (or I really did break it
### recently - or I'm confused)...
def refresh_activity_windows(self):
"""
Update any windows with a plotgroup_key ending in 'Activity'.
Used primarily for debugging long scripts that present a lot of activity patterns.
"""
for win in self.auto_refresh_panels:
if re.match('.*Activity$',win.plotgroup.name):
win.refresh()
self.update_idletasks()
def open_model_editor(self):
"""Start the Model editor."""
return ModelEditor(self)
def new_about_window(self):
win = tk.AppWindow(self)
win.withdraw()
win.title("About Topographica")
text = Label(win,text=topo.about(display=False),justify=LEFT)
text.pack(side=LEFT)
win.deiconify()
#self.messageBar.message('state', 'OK')
def open_location(self, locations):
"""
Try to open one of the specified locations in a new window of the default
browser. See webbrowser module for more information.
locations should be a tuple.
"""
# CB: could have been a list. This is only here because if locations is set
# to a string, it will loop over the characters of the string.
assert isinstance(locations,tuple),"locations must be a tuple."
for location in locations:
try:
existing_location = resolve_path(location)
webbrowser.open(existing_location,new=2,autoraise=True)
self.messageBar.response('Opened local file '+existing_location+' in browser.')
return ###
except:
pass
for location in locations:
if location.startswith('http'):
try:
webbrowser.open(location,new=2,autoraise=True)
self.messageBar.response('Opened remote location '+location+' in browser.')
return ###
except:
pass
self.messageBar.response("Could not open any of %s in a browser."%locations)
# CEBALERT: need to take care of removing old messages automatically?
# (Otherwise callers might always have to pass 'ok'.)
def status_message(self,m):
self.messageBar.response(m)
def run_simulation(self,event=None): # event=None allows use as callback
"""
Run the simulation for the duration specified in the
'run for' taggedslider.
"""
fduration = self.run_for_var.get()
self.open_progress_window(timer=topo.sim.timer)
topo.sim.run_and_time(fduration)
self.auto_refresh()
# CEBERRORALERT: Step button does strange things at time==0.
# E.g. for lissom_oo_or, nothing appears to happen. For
# hierarchical, runs to time==10.
def run_step(self):
if not topo.sim.events:
# JP: step button should be disabled if there are no events,
# but just in case...
return
# JPALERT: This should really use .run_and_time() but it doesn't support
# run(until=...)
topo.sim.run(until=topo.sim.events[0].time)
self.auto_refresh()
def set_step_button_state(self):
if topo.sim.events:
self.step_button.config(state=NORMAL)
else:
self.step_button.config(state=DISABLED)
def __get_status_bar(self,i=2):
# Hack to find appropriate status bar: Go back through frames
# until a widget with a status bar is found, and return it.
try:
while True:
f = sys._getframe(i)
if hasattr(f,'f_locals'):
if 'self' in f.f_locals:
o = f.f_locals['self']
# (temporary hack til ScrolledFrame cleaned up)
if o.__class__.__name__!='ScrolledFrame':
if hasattr(o,'messageBar'):
return o.messageBar
elif hasattr(o,'status'):
return o.status
i+=1
except:
pass
#print "GUI INTERNAL WARNING: failed to determine window on which to display message."
return self.messageBar
def open_progress_window(self,timer,title=None):
"""
Provide a convenient link to progress bars.
"""
stat = self.__get_status_bar()
return stat.open_progress_window(timer=timer,sim=topo.sim)
# CEBALERT: of course dictionary access is used as an alternative to
# the config method or whatever it's called! So this could cause
# serious confusion to someone trying to set config options using the
# dictionary style access rather than .config()! Either document
# clearly or abandon, and get() and set() to access menu entries by
# name.
class ControllableMenu(tk.Menu):
"""
A Menu, but where entries are accessible by name (using
dictionary-style access).
** Not truly compatible with Tkinter; work in progress **
"""
def __getitem__(self,name):
return self.named_commands[name]
if __name__ != '__main__':
plotpanel_classes['Activity'] = SheetPanel
plotpanel_classes['Connection Fields'] = ConnectionFieldsPanel
plotpanel_classes['RF Projection'] = RFProjectionPanel
plotpanel_classes['Projection'] = CFProjectionPanel
plotpanel_classes['Projection Activity'] = ProjectionActivityPanel
| bsd-3-clause |
roxyboy/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
beepee14/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
FluVigilanciaBR/seasonality | methods/misc/split_email_data.py | 1 | 6025 | # coding:utf8
__author__ = 'Marcelo Ferreira da Costa Gomes'
import glob
import os
import argparse
import logging
import re
import zipfile
import pandas as pd
from argparse import RawDescriptionHelpFormatter
from datetime import date
from dbfread import DBF
from fnmatch import fnmatch
from patoolib import extract_archive
from subprocess import run
module_logger = logging.getLogger('update_system.email_extract')
def remove_whitespace(x):
if isinstance(x, object):
x = x.str.lstrip().str.rstrip()
x = x.where(lambda v: v != '', None)
return x
else:
return x
def date_cleanup(df, dt_cols):
'''
Standardize column data and discard those without notification and/or first symptoms date.
:param df: notifications data frame
:param dt_cols: list of data columns
:return: df: standardized data frame
'''
module_logger.info('Date columns cleanup: %s' % dt_cols)
# Filter by notification date
df = df.where(df != -1, None)
df[dt_cols] = df[dt_cols].where(df[dt_cols] != 10101, None)
# Convert all date related columns to datetime format
for col in dt_cols:
# Convert all date columns to datetime format. Output will have the format YYYY-MM-DD
dtformat = '%Y%m%d'
sample = df.loc[~pd.isnull(df[col]), col].values[0]
if isinstance(sample, str):
df[col] = remove_whitespace(df[col])
if sum(~pd.isnull(df[col])) > 0:
sample = df.loc[~pd.isnull(df[col]), col].values[0]
if isinstance(sample, str):
if 'T' in sample:
df[col] = pd.to_datetime(df[col].str[:10], errors='coerce', format='%Y-%m-%d')
else:
dtsep = '-'
if '/' in sample:
dtsep = '/'
dttest = pd.DataFrame(list(
df.loc[~pd.isnull(df[col]), col].str.split(dtsep)
))
maxvals = [int(dttest[i].max()) for i in range(3)]
del dttest
yearpos = maxvals.index(max(maxvals))
if yearpos == 2:
if maxvals[1] > 12:
dtformat = '%m' + dtsep + '%d' + dtsep + '%Y'
else:
dtformat = '%d' + dtsep + '%m' + dtsep + '%Y'
else:
dtformat = '%Y' + dtsep + '%m' + dtsep + '%d'
df[col] = pd.to_datetime(df[col], errors='coerce', format=dtformat)
else:
df[col] = pd.to_datetime(df[col], errors='coerce', format=dtformat)
else:
df[col] = pd.to_datetime(df[col])
return df
def write_to_folder(df, year, dir=None, append=False):
if not dir:
dir = os.getcwd()
output = os.path.join(dir, '..', 'data', 'INFLUD%s.csv' % year)
if append:
df = df.append(pd.read_csv(output, dtype=df.dtypes.to_dict()), ignore_index=True, sort=False)
df.columns = df.columns.str.upper()
df.to_csv(output, index=False, encoding='utf-8', date_format='%Y-%m-%d')
return
def extract_csv(dir, sep=',', year=None):
cwd = os.getcwd()
os.chdir(dir)
today = date.today().strftime('%Y-%m-%d')
for f in glob.glob('*.zip'):
run(["rename", 's/ //', f], check=True)
try:
fz = sorted(glob.glob('*.zip'), reverse=True)
fz.extend(glob.glob('*.rar'))
for f in fz:
module_logger.info('Extracting files from: %s' % f)
extract_archive(f, outdir='./')
run(['mv', '-f', f, './processed/%s_%s' % (today, f)], check=True)
except ValueError as error:
module_logger.error(error)
raise ValueError('No zip or rar file on %s' % dir)
if glob.glob('*.csv'):
flist = glob.glob('*.csv')
suff = '.csv'
elif glob.glob('*.CSV'):
flist = glob.glob('*.CSV')
suff = '.CSV'
elif glob.glob('*.dbf'):
flist = glob.glob('*.dbf')
suff = '.dbf'
elif glob.glob('*.DBF'):
flist = glob.glob('*.DBF')
suff = '.DBF'
elif glob.glob('SRAGHOSP*'):
flist = [f for f in glob.glob("SRAGHOSP*") if not fnmatch(f, "*.zip")]
yold = -1
for file in flist:
module_logger.info('Processing file: %s' % file)
if 'csv' in file.lower():
for enc in ['utf-8', 'utf-16', 'latin-1']:
try:
df = pd.read_csv(file, sep=sep, header=0, encoding=enc, low_memory=False)
break
except UnicodeDecodeError:
pass
else:
for enc in ['utf-8', 'utf-16', 'latin-1']:
try:
table = DBF(file, encoding=enc)
df = pd.DataFrame(iter(table))
break
except UnicodeDecodeError:
pass
df.columns = df.columns.str.upper()
tgtcols = list(df.columns)
regexp = re.compile('^DT')
dt_cols = list(filter(regexp.match, tgtcols))
df = date_cleanup(df, dt_cols)
write_to_folder(df[df.DT_SIN_PRI < '2021-01-03'], 2020, cwd)
write_to_folder(df[df.DT_SIN_PRI >= '2021-01-03'], 2021, cwd)
run(['rm', '-f', dir+file], check=True)
os.chdir(cwd)
return df
def main(dir, sep=',', year=None):
df = extract_csv(dir, sep=sep, year=year)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Extract csv from zipped file sent by email.\n" +
"python3 email_extract.py --dir [folder] --year [YYYY]\n",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--dir', help='Base folder')
parser.add_argument('--year', help='Base year')
parser.add_argument('--sep', help='Column delimiter')
args = parser.parse_args()
print(args)
main(args.dir, args.sep, args.year)
| gpl-3.0 |
raghakot/keras-vis | vis/utils/utils.py | 1 | 11157 | from __future__ import absolute_import
from __future__ import division
import os
import tempfile
import math
import json
import six
import numpy as np
import matplotlib.font_manager as fontman
from skimage import io, transform
from keras import backend as K
from keras.models import load_model
import logging
logger = logging.getLogger(__name__)
try:
import PIL as pil
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
except ImportError:
pil = None
# Globals
_CLASS_INDEX = None
def _check_pil():
if not pil:
raise ImportError('Failed to import PIL. You must install Pillow')
def _find_font_file(query):
"""Utility to find font file.
"""
return list(filter(lambda path: query.lower() in os.path.basename(path).lower(), fontman.findSystemFonts()))
def reverse_enumerate(iterable):
"""Enumerate over an iterable in reverse order while retaining proper indexes, without creating any copies.
"""
return zip(reversed(range(len(iterable))), reversed(iterable))
def listify(value):
"""Ensures that the value is a list. If it is not a list, it creates a new list with `value` as an item.
"""
if not isinstance(value, list):
value = [value]
return value
def add_defaults_to_kwargs(defaults, **kwargs):
"""Updates `kwargs` with dict of `defaults`
Args:
defaults: A dictionary of keys and values
**kwargs: The kwargs to update.
Returns:
The updated kwargs.
"""
defaults = dict(defaults)
defaults.update(kwargs)
return defaults
def get_identifier(identifier, module_globals, module_name):
"""Helper utility to retrieve the callable function associated with a string identifier.
Args:
identifier: The identifier. Could be a string or function.
module_globals: The global objects of the module.
module_name: The module name
Returns:
The callable associated with the identifier.
"""
if isinstance(identifier, six.string_types):
fn = module_globals.get(identifier)
if fn is None:
raise ValueError('Unknown {}: {}'.format(module_name, identifier))
return fn
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret identifier')
def apply_modifications(model, custom_objects=None):
"""Applies modifications to the model layers to create a new Graph. For example, simply changing
`model.layers[idx].activation = new activation` does not change the graph. The entire graph needs to be updated
with modified inbound and outbound tensors because of change in layer building function.
Args:
model: The `keras.models.Model` instance.
Returns:
The modified model with changes applied. Does not mutate the original `model`.
"""
# The strategy is to save the modified model and load it back. This is done because setting the activation
# in a Keras layer doesnt actually change the graph. We have to iterate the entire graph and change the
# layer inbound and outbound nodes with modified tensors. This is doubly complicated in Keras 2.x since
# multiple inbound and outbound nodes are allowed with the Graph API.
model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5')
try:
model.save(model_path)
return load_model(model_path, custom_objects=custom_objects)
finally:
os.remove(model_path)
def random_array(shape, mean=128., std=20.):
"""Creates a uniformly distributed random array with the given `mean` and `std`.
Args:
shape: The desired shape
mean: The desired mean (Default value = 128)
std: The desired std (Default value = 20)
Returns: Random numpy array of given `shape` uniformly distributed with desired `mean` and `std`.
"""
x = np.random.random(shape)
# normalize around mean=0, std=1
x = (x - np.mean(x)) / (np.std(x) + K.epsilon())
# and then around the desired mean/std
x = (x * std) + mean
return x
def find_layer_idx(model, layer_name):
"""Looks up the layer index corresponding to `layer_name` from `model`.
Args:
model: The `keras.models.Model` instance.
layer_name: The name of the layer to lookup.
Returns:
The layer index if found. Raises an exception otherwise.
"""
layer_idx = None
for idx, layer in enumerate(model.layers):
if layer.name == layer_name:
layer_idx = idx
break
if layer_idx is None:
raise ValueError("No layer with name '{}' within the model".format(layer_name))
return layer_idx
def deprocess_input(input_array, input_range=(0, 255)):
"""Utility function to scale the `input_array` to `input_range` throwing away high frequency artifacts.
Args:
input_array: An N-dim numpy array.
input_range: Specifies the input range as a `(min, max)` tuple to rescale the `input_array`.
Returns:
The rescaled `input_array`.
"""
# normalize tensor: center on 0., ensure std is 0.1
input_array = input_array.copy()
input_array -= input_array.mean()
input_array /= (input_array.std() + K.epsilon())
input_array *= 0.1
# clip to [0, 1]
input_array += 0.5
input_array = np.clip(input_array, 0, 1)
# Convert to `input_range`
return (input_range[1] - input_range[0]) * input_array + input_range[0]
def stitch_images(images, margin=5, cols=5):
"""Utility function to stitch images together with a `margin`.
Args:
images: The array of 2D images to stitch.
margin: The black border margin size between images (Default value = 5)
cols: Max number of image cols. New row is created when number of images exceed the column size.
(Default value = 5)
Returns:
A single numpy image array comprising of input images.
"""
if len(images) == 0:
return None
h, w, c = images[0].shape
n_rows = int(math.ceil(len(images) / cols))
n_cols = min(len(images), cols)
out_w = n_cols * w + (n_cols - 1) * margin
out_h = n_rows * h + (n_rows - 1) * margin
stitched_images = np.zeros((out_h, out_w, c), dtype=images[0].dtype)
for row in range(n_rows):
for col in range(n_cols):
img_idx = row * cols + col
if img_idx >= len(images):
break
stitched_images[(h + margin) * row: (h + margin) * row + h,
(w + margin) * col: (w + margin) * col + w, :] = images[img_idx]
return stitched_images
def get_img_shape(img):
"""Returns image shape in a backend agnostic manner.
Args:
img: An image tensor of shape: `(channels, image_dims...)` if data_format='channels_first' or
`(image_dims..., channels)` if data_format='channels_last'.
Returns:
Tuple containing image shape information in `(samples, channels, image_dims...)` order.
"""
if isinstance(img, np.ndarray):
shape = img.shape
else:
shape = K.int_shape(img)
if K.image_data_format() == 'channels_last':
shape = list(shape)
shape.insert(1, shape[-1])
shape = tuple(shape[:-1])
return shape
def load_img(path, grayscale=False, target_size=None):
"""Utility function to load an image from disk.
Args:
path: The image file path.
grayscale: True to convert to grayscale image (Default value = False)
target_size: (w, h) to resize. (Default value = None)
Returns:
The loaded numpy image.
"""
img = io.imread(path, grayscale)
if target_size:
img = transform.resize(img, target_size, preserve_range=True).astype('uint8')
return img
def lookup_imagenet_labels(indices):
"""Utility function to return the image net label for the final `dense` layer output index.
Args:
indices: Could be a single value or an array of indices whose labels should be looked up.
Returns:
Image net label corresponding to the image category.
"""
global _CLASS_INDEX
if _CLASS_INDEX is None:
with open(os.path.join(os.path.dirname(__file__), '../../resources/imagenet_class_index.json')) as f:
_CLASS_INDEX = json.load(f)
indices = listify(indices)
return [_CLASS_INDEX[str(idx)][1] for idx in indices]
def draw_text(img, text, position=(10, 10), font='FreeSans.ttf', font_size=14, color=(0, 0, 0)):
"""Draws text over the image. Requires PIL.
Args:
img: The image to use.
text: The text string to overlay.
position: The text (x, y) position. (Default value = (10, 10))
font: The ttf or open type font to use. (Default value = 'FreeSans.ttf')
font_size: The text font size. (Default value = 12)
color: The (r, g, b) values for text color. (Default value = (0, 0, 0))
Returns: Image overlayed with text.
"""
_check_pil()
font_files = _find_font_file(font)
if len(font_files) == 0:
logger.warn("Failed to lookup font '{}', falling back to default".format(font))
font = ImageFont.load_default()
else:
font = ImageFont.truetype(font_files[0], font_size)
# Don't mutate original image
img = Image.fromarray(img)
draw = ImageDraw.Draw(img)
draw.text(position, text, fill=color, font=font)
return np.asarray(img)
def bgr2rgb(img):
"""Converts an RGB image to BGR and vice versa
Args:
img: Numpy array in RGB or BGR format
Returns: The converted image format
"""
return img[..., ::-1]
def normalize(array, min_value=0., max_value=1.):
"""Normalizes the numpy array to (min_value, max_value)
Args:
array: The numpy array
min_value: The min value in normalized array (Default value = 0)
max_value: The max value in normalized array (Default value = 1)
Returns:
The array normalized to range between (min_value, max_value)
"""
arr_min = np.min(array)
arr_max = np.max(array)
normalized = (array - arr_min) / (arr_max - arr_min + K.epsilon())
return (max_value - min_value) * normalized + min_value
class _BackendAgnosticImageSlice(object):
"""Utility class to make image slicing uniform across various `image_data_format`.
"""
def __getitem__(self, item_slice):
"""Assuming a slice for shape `(samples, channels, image_dims...)`
"""
if K.image_data_format() == 'channels_first':
return item_slice
else:
# Move channel index to last position.
item_slice = list(item_slice)
item_slice.append(item_slice.pop(1))
return tuple(item_slice)
"""Slice utility to make image slicing uniform across various `image_data_format`.
Example:
conv_layer[utils.slicer[:, filter_idx, :, :]] will work for both `channels_first` and `channels_last` image
data formats even though, in tensorflow, slice should be conv_layer[utils.slicer[:, :, :, filter_idx]]
"""
slicer = _BackendAgnosticImageSlice()
| mit |
ScoffM/ITESO-Word2Vec | Word2Vec.py | 1 | 9935 | import pandas as pd
# Read data from files
train = pd.read_csv("labeledTrainData.tsv", header=0, delimiter="\t", quoting=3)
test = pd.read_csv("testData.tsv", header=0, delimiter="\t", quoting=3)
unlabeled_train = pd.read_csv("unlabeledTrainData.tsv", header=0, delimiter="\t", quoting=3)
# Verify the number of reviews that were read (100,000 in total)
print "Read %d labeled train reviews, %d labeled test reviews, " "and %d unlabeled reviews\n" % (train["review"].size,
test["review"].size, unlabeled_train["review"].size )
# Import various modules for string cleaning
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
# Function to process reviews, remove stop words optional
def review_to_wordlist( review, remove_stopwords=False ):
# 1. Remove HTML
review_text = BeautifulSoup(review,"html.parser").get_text()
# 2. Remove non-letters
review_text = re.sub("[^a-zA-Z]"," ", review_text)
# 3. Convert words to lower case and split them
words = review_text.lower().split()
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
# 5. Return a list of words
return(words)
import nltk.data
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
toks = nltk.data.load('tokenizers/punkt/english.pickle')
# Define a function to split a review into parsed sentences
def review_to_sentences(review, tokenizer, remove_stopwords=False):
# 1. Use the NLTK tokenizer to split the paragraph into sentences
raw_sentences = tokenizer.tokenize(review.strip())
# 2. Loop over each sentence
sentences = []
for raw_sentence in raw_sentences:
if len(raw_sentence) > 0:
sentences.append(review_to_wordlist(raw_sentence, remove_stopwords))
#Returns a list of sentences, where each sentence is a list of words
return sentences
sentences = [] # Initialize an empty list of sentences
print "Parsing sentences from training set"
for review in train["review"]:
sentences += review_to_sentences(review.decode("utf8"), tokenizer)
print "Parsing sentences from unlabeled set"
for review in unlabeled_train["review"]:
sentences += review_to_sentences(review.decode("utf8"), tokenizer)
# Should probably make script to remove URLs from reviews.
# Import the built-in logging module and configure it so that Word2Vec
# creates nice output messages
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',\
level=logging.INFO)
# Set values for various parameters
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model (this will take some time)
from gensim.models import word2vec
print "Training model..."
model = word2vec.Word2Vec(sentences, workers=num_workers,
size=num_features, min_count = min_word_count,
window = context, sample = downsampling)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model_name = "300features_40minwords_10context"
model.save(model_name)
import numpy as np # Make sure that numpy is imported
def makeFeatureVec(words, model, num_features):
# Function to average all of the word vectors in a given
# paragraph
#
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,),dtype="float32")
#
nwords = 0.
#
# Index2word is a list that contains the names of the words in
# the model's vocabulary. Convert it to a set, for speed
index2word_set = set(model.index2word)
#
# Loop over each word in the review and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in words:
if word in index2word_set:
nwords = nwords + 1.
featureVec = np.add(featureVec,model[word])
#
# Divide the result by the number of words to get the average
featureVec = np.divide(featureVec,nwords)
return featureVec
def getAvgFeatureVecs(reviews, model, num_features):
# Given a set of reviews (each one a list of words), calculate
# the average feature vector for each one and return a 2D numpy array
#
# Initialize a counter
counter = 0.
#
# Preallocate a 2D numpy array, for speed
reviewFeatureVecs = np.zeros((len(reviews),num_features),dtype="float32")
#
# Loop through the reviews
for review in reviews:
# Print a status message every 1000th review
if counter%1000. == 0.:
print "Review %d of %d" % (counter, len(reviews))
#
# Call the function (defined above) that makes average feature vectors
reviewFeatureVecs[counter] = makeFeatureVec(review, model, num_features)
#
# Increment the counter
counter = counter + 1.
return reviewFeatureVecs
# ****************************************************************
# Calculate average feature vectors for training and testing sets,
# using the functions we defined above. Notice that we now use stop word
# removal.
clean_train_reviews = []
for review in train["review"]:
clean_train_reviews.append(review_to_wordlist(review, remove_stopwords=True))
trainDataVecs = getAvgFeatureVecs(clean_train_reviews, model, num_features)
print "Creating average feature vecs for test reviews"
clean_test_reviews = []
for review in test["review"]:
clean_test_reviews.append(review_to_wordlist(review, remove_stopwords=True))
testDataVecs = getAvgFeatureVecs(clean_test_reviews, model, num_features)
# Fit a random forest to the training data, using 100 trees
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier( n_estimators = 100 )
print "Fitting a random forest to labeled training data..."
forest = forest.fit( trainDataVecs, train["sentiment"] )
# Test & extract results
result = forest.predict( testDataVecs )
# Write the test results
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
output.to_csv( "Word2Vec_AverageVectors.csv", index=False, quoting=3 )
print "Done!"
from sklearn.cluster import KMeans
import time
start = time.time() # Start time
# Set "k" (num_clusters) to be 1/5th of the vocabulary size, or an
# average of 5 words per cluster
word_vectors = model.syn0
num_clusters = word_vectors.shape[0] / 5
# Initalize a k-means object and use it to extract centroids
kmeans_clustering = KMeans( n_clusters = num_clusters )
idx = kmeans_clustering.fit_predict( word_vectors )
# Get the end time and print how long the process took
end = time.time()
elapsed = end - start
print "Time taken for K Means clustering: ", elapsed, "seconds."
# Create a Word / Index dictionary, mapping each vocabulary word to
# a cluster number
word_centroid_map = dict(zip( model.index2word, idx ))
# For the first 10 clusters
for cluster in xrange(0,10):
#
# Print the cluster number
print "\nCluster %d" % cluster
#
# Find all of the words for that cluster number, and print them out
words = []
for i in xrange(0,len(word_centroid_map.values())):
if( word_centroid_map.values()[i] == cluster ):
words.append(word_centroid_map.keys()[i])
print words
def create_bag_of_centroids( wordlist, word_centroid_map ):
#
# The number of clusters is equal to the highest cluster index
# in the word / centroid map
num_centroids = max( word_centroid_map.values() ) + 1
#
# Pre-allocate the bag of centroids vector (for speed)
bag_of_centroids = np.zeros( num_centroids, dtype="float32" )
#
# Loop over the words in the review. If the word is in the vocabulary,
# find which cluster it belongs to, and increment that cluster count
# by one
for word in wordlist:
if word in word_centroid_map:
index = word_centroid_map[word]
bag_of_centroids[index] += 1
#
# Return the "bag of centroids"
return bag_of_centroids
# Pre-allocate an array for the training set bags of centroids (for speed)
train_centroids = np.zeros( (train["review"].size, num_clusters), \
dtype="float32" )
# Transform the training set reviews into bags of centroids
counter = 0
for review in clean_train_reviews:
train_centroids[counter] = create_bag_of_centroids( review, \
word_centroid_map )
counter += 1
# Repeat for test reviews
test_centroids = np.zeros(( test["review"].size, num_clusters), \
dtype="float32" )
counter = 0
for review in clean_test_reviews:
test_centroids[counter] = create_bag_of_centroids( review, \
word_centroid_map )
counter += 1
# Fit a random forest and extract predictions
forest = RandomForestClassifier(n_estimators = 100)
# Fitting the forest may take a few minutes
print "Fitting a random forest to labeled training data..."
start = time.time()
forest = forest.fit(train_centroids,train["sentiment"])
result = forest.predict(test_centroids)
# Write the test results
output = pd.DataFrame(data={"id":test["id"], "sentiment":result})
output.to_csv( "BagOfCentroids.csv", index=False, quoting=3 )
end = time.time()
elapsed = end - start
print "Done in ", elapsed, " seconds."
| gpl-3.0 |
abdulbaqi/ThinkStats2 | code/density.py | 67 | 2934 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import random
import brfss
import first
import thinkstats2
import thinkplot
def Summarize(data):
"""Prints summary statistics.
data: pandas Series
"""
mean = data.mean()
std = data.std()
median = thinkstats2.Median(data)
print('mean', mean)
print('std', std)
print('median', median)
print('skewness', thinkstats2.Skewness(data))
print('pearson skewness',
thinkstats2.PearsonMedianSkewness(data))
return mean, median
def ComputeSkewnesses():
"""Plots KDE of birthweight and adult weight.
"""
def VertLine(x, y):
thinkplot.Plot([x, x], [0, y], color='0.6', linewidth=1)
live, firsts, others = first.MakeFrames()
data = live.totalwgt_lb.dropna()
print('Birth weight')
mean, median = Summarize(data)
y = 0.35
VertLine(mean, y)
thinkplot.Text(mean-0.15, 0.1*y, 'mean', horizontalalignment='right')
VertLine(median, y)
thinkplot.Text(median+0.1, 0.1*y, 'median', horizontalalignment='left')
pdf = thinkstats2.EstimatedPdf(data)
thinkplot.Pdf(pdf, label='birth weight')
thinkplot.Save(root='density_totalwgt_kde',
xlabel='lbs',
ylabel='PDF')
df = brfss.ReadBrfss(nrows=None)
data = df.wtkg2.dropna()
print('Adult weight')
mean, median = Summarize(data)
y = 0.02499
VertLine(mean, y)
thinkplot.Text(mean+1, 0.1*y, 'mean', horizontalalignment='left')
VertLine(median, y)
thinkplot.Text(median-1.5, 0.1*y, 'median', horizontalalignment='right')
pdf = thinkstats2.EstimatedPdf(data)
thinkplot.Pdf(pdf, label='adult weight')
thinkplot.Save(root='density_wtkg2_kde',
xlabel='kg',
ylabel='PDF',
xlim=[0, 200])
def MakePdfExample(n=500):
"""Plots a normal density function and a KDE estimate.
n: sample size
"""
# mean and var of women's heights in cm, from the BRFSS
mean, var = 163, 52.8
std = math.sqrt(var)
# make a PDF and compute a density, FWIW
pdf = thinkstats2.NormalPdf(mean, std)
print(pdf.Density(mean + std))
# make a PMF and plot it
thinkplot.PrePlot(2)
thinkplot.Pdf(pdf, label='normal')
# make a sample, make an estimated PDF, and plot it
sample = [random.gauss(mean, std) for _ in range(n)]
sample_pdf = thinkstats2.EstimatedPdf(sample)
thinkplot.Pdf(sample_pdf, label='sample KDE')
thinkplot.Save(root='pdf_example',
xlabel='Height (cm)',
ylabel='Density')
def main():
thinkstats2.RandomSeed(17)
MakePdfExample()
ComputeSkewnesses()
if __name__ == '__main__':
main()
| gpl-3.0 |
nonsk131/USRP2016 | generate_tests6000-7000.py | 1 | 3759 | from isochrones.dartmouth import Dartmouth_Isochrone
from isochrones.utils import addmags
import numpy as np
import pandas as pd
file = open('/tigress/np5/true_params.txt','a')
for n in range(6000,7000,1):
index = str(n)
file.write('test: ' + index + '\n')
dar = Dartmouth_Isochrone()
array = np.random.rand(2) + 0.5
if array[0] > array[1]:
M1 = array[1]
M2 = array[0]
else:
M1 = array[0]
M2 = array[1]
feh1,feh2 = 0.7*np.random.rand(2) - 0.5
random1, random2 = np.random.rand(2)
age_low, age_high = dar.agerange(M1,feh1)
age_low = max(age_low, dar.minage)
age_high = min(age_high, dar.maxage)
age_low = 10**age_low
age_high = 10**age_high
age1 = random1*(age_high - age_low) + age_low
age1 = np.log10(age1)
age_low, age_high = dar.agerange(M2,feh2)
age_low = max(age_low, dar.minage)
age_high = min(age_high, dar.maxage)
age_low = 10**age_low
age_high = 10**age_high
age2 = random2*(age_high - age_low) + age_low
age2 = np.log10(age2)
array = 1400*np.random.rand(2) + 100
if array[0] > array[1]:
distance1 = array[1]
distance2 = array[0]
else:
distance1 = array[0]
distance2 = array[1]
AV1, AV2 = 0.5*np.random.rand(2)+0.5
AV1 = AV1 * distance1/1000.0
AV2 = AV2 * distance2/1000.0
params = (M1,M2,age1,age2,feh1,feh2,distance1,distance2,AV1,AV2)
#Simulate true magnitudes
unresolved_bands = ['J','H','K']
resolved_bands = ['i','K']
args1 = (age1, feh1, distance1, AV1)
args2 = (age2, feh2, distance2, AV2)
unresolved = {b:addmags(dar.mag[b](M1, *args1), dar.mag[b](M2, *args2)) for b in unresolved_bands}
resolved_1 = {b:dar.mag[b](M1, *args1) for b in resolved_bands}
resolved_2 = {b:dar.mag[b](M2, *args2) for b in resolved_bands}
params = str(params)
file.write('(M1,M2,age1,age2,feh1,feh2,distance1,distance2,AV1,AV2) = ' + params + '\n')
file.write('\n')
#print dar.mag['K'](M2, *args2)
#print unresolved, resolved_1, resolved_2
instruments = ['twomass','RAO']
bands = {'twomass':['J','H','K'],
'RAO':['i','K']}
mag_unc = {'twomass': 0.02, 'RAO':0.04}
resolution = {'twomass':4.0, 'RAO':0.1}
relative = {'twomass':False, 'RAO':True}
separation = 0.5
PA = 100.
columns = ['name', 'band', 'resolution', 'relative', 'separation', 'pa', 'mag', 'e_mag']
df = pd.DataFrame(columns=columns)
i=0
for inst in ['twomass']: #Unresolved observations
for b in bands[inst]:
row = {}
row['name'] = inst
row['band'] = b
row['resolution'] = resolution[inst]
row['relative'] = relative[inst]
row['separation'] = 0.
row['pa'] = 0.
row['mag'] = unresolved[b]
row['e_mag'] = mag_unc[inst]
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
for inst in ['RAO']: #Resolved observations
for b in bands[inst]:
mags = [resolved_1[b], resolved_2[b]]
pas = [0, PA]
seps = [0., separation]
for mag,sep,pa in zip(mags,seps,pas):
row = {}
row['name'] = inst
row['band'] = b
row['resolution'] = resolution[inst]
row['relative'] = relative[inst]
row['separation'] = sep
row['pa'] = pa
row['mag'] = mag
row['e_mag'] = mag_unc[inst]
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
#print df
df.to_csv(path_or_buf='/tigress/np5/dataFrame/df_binary_test{}.csv'.format(index))
file.close()
| mit |
emeb/iceRadio | FPGA/rxadc_2/python/tst_ddc.py | 1 | 1392 | #!/usr/bin/python3
#
# Digital DownConverter testbench
#
# 07-23-2015 E. Brombaugh
# Test out the DDC
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy.signal as signal
from scipy.fftpack import fft, ifft, fftfreq, fftshift
from ddc import ddc
# generate a signal
data_len = 2**18
Fs = 20e6
Ft = 7.125e6
data_bits = 10
data_scl = 2**(data_bits-1)-1
t = np.arange(data_len)/Fs
data_in = np.floor(data_scl/2 * (np.sin(2*np.pi*(Ft+1000)*t) + np.sin(2*np.pi*(Ft+25000)*t)) + 0.5)
# init the model
uut = ddc(data_bits)
uut.set_ftune(Ft/Fs)
# run the ddc
ddc_out = uut.calc(data_in)
# prepare to plot
data = ddc_out
rate = Fs/(uut.cic_i_inst.dec_rate*4)
data_len = len(data)
t = np.arange(data_len)/rate
# plot of time
fig = plt.figure(1)
plt.plot(t, np.real(data), label="real")
plt.plot(t, np.imag(data), label="imag")
plt.grid()
plt.xlabel("Time")
plt.ylabel("data")
plt.title("sinusoid - time")
plt.legend()
# plot of frequency
fig = plt.figure(2)
f = rate * fftshift(fftfreq(data_len))/1e3
win = signal.blackmanharris(data_len)
data_bhwin = data * win
bh_gain = sum(win)/data_len
data_dB = 20*np.log10(np.abs(fftshift(fft(data_bhwin)))/
(data_len*(data_scl/2)*bh_gain))
plt.plot(f, data_dB)
plt.grid()
plt.xlabel("Frequency (kHz)")
plt.ylabel("dB")
plt.title("sinusoid - freq")
#plt.xlim((0, (rate/1e3)/2))
plt.show()
| mit |
Jessime/pokemon_battle | App.py | 1 | 4662 | import os
import numpy as np
import pandas as pd
from webbrowser import open_new_tab
from flask import Flask, render_template, redirect, request, jsonify
from flask import Flask, render_template, request, flash, send_from_directory
from flaskext.mysql import MySQL
from battle import Simulation
app = Flask(__name__)
############################ Data #######################
class Data:
stats = pd.read_csv('Pokemon.csv').groupby('#').first()
username = None
password = None
t1 = None
t2 = None
class DB:
def __init__(self, connection):
self.connection = connection
def check_un(username):
cursor = self.connection.cursor()
cursor.execute(f"SELECT * FROM users WHERE username='{Data.username}'")
result = cursor.fetchone()
cursor.close()
return result
def check_un_pw(username, password):
cursor = self.connection.cursor()
cursor.execute(f"SELECT * FROM users WHERE username='{Data.username}' AND password='{Data.password}'")
result = cursor.fetchone()
cursor.close()
return result
def signUp(username, password):
cursor = self.connection.cursor()
query = f"INSERT INTO users (username, password,wins,losses) VALUES ('{username}','{password}',0,0)"
cursor.execute(query)
conn.commit()
cursor.close()
def updateTable(res, username):
cursor = self.connection.cursor()
if res:
cursor.execute(f"UPDATE users SET wins=wins+1 WHERE username='{username}'")
else:
cursor.execute(f"UPDATE users SET losses=losses+1 WHERE username='{username}'")
conn.commit()
cursor.close()
def get_wins_losses(username):
cursor = self.connection.cursor()
cursor.execute(f"SELECT wins, losses FROM users WHERE username='{Data.username}'")
result = cursor.fetchone()
cursor.close()
return result
app.config['MYSQL_DATABASE_USER'] = os.environ['MYSQL_USER']
app.config['MYSQL_DATABASE_PASSWORD'] = os.environ['MYSQL_PASSWORD']
app.config['MYSQL_DATABASE_HOST'] = os.environ['MYSQL_HOST']
app.config['MYSQL_DATABASE_DB'] = os.environ['MYSQL_DB']
mysql = MySQL()
mysql.init_app(app)
conn = mysql.connect()
db = DB(conn)
#########################################################
def choose_teams():
t1 = np.random.choice(range(1, 722), size=5, replace=False)
t2 = np.random.choice(range(1, 722), size=5, replace=False)
t1 = Data.stats.loc[t1]
t2 = Data.stats.loc[t2]
return t1, t2
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
Data.username = request.form['username']
Data.password = request.form['password']
data = db.check_un_pw(Data.username, Data.password)
if data is None:
error = 'Invalid username or password. Please try again!'
return render_template('login.html', error=error)
else:
return redirect('battle')
return render_template('login.html')
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
Data.username = request.form['username']
Data.password = request.form['password']
data = db.check_un(Data.username)
if data is not None:
error = ' username has already been taken!'
return render_template('signup.html', error=error)
else:
db.signUp(Data.username, Data.password)
Data.t1, Data.t2 = choose_teams()
return render_template('battle.html', t1=Data.t1, t2=Data.t2)
return render_template('signup.html')
@app.route('/battle', methods=['GET', 'POST'])
def battle():
if request.method == 'POST':
guess = 0 if request.form['guess'] == 'team1' else 1
result = Simulation(Data.t1.index.values, Data.t2.index.values, guess).run()
print('Guess:', guess)
print('result:', result)
db.updateTable(result, Data.username)
Data.t1, Data.t2 = choose_teams()
wins, losses = db.get_wins_losses(Data.username)
return render_template('battle.html',
t1=Data.t1,
t2=Data.t2,
username=Data.username,
wins=wins,
losses=losses)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == "__main__":
#url = 'http://127.0.0.1:5001'
#open_new_tab(url)
#app.run(debug=True)
app.debug = False
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| mit |
rabitt/motif | motif/plot.py | 1 | 2442 | # -*- coding: utf-8 -*-
""" Plotting helper functions
"""
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set()
from .utils import load_annotation
def plot_with_annotation(ctr, annotation_fpath, single_f0=True):
'''Plot all contours in a single color against an annotation.
Useful for viewing contour coverage/accuracy.
Parameters
----------
ctr : Contours
An instance of a Contours object
annotation_fpath : str
Path to an annotation file.
single_f0 : bool
If True, assumes the annotation contains one pitch at a time.
If False, assumes there may be multiple ground truth pitches.
'''
if single_f0:
ref_times, ref_freqs = load_annotation(
annotation_fpath, n_freqs=1, to_array=False, rm_zeros=True
)
# ref_freqs = [f if f[0] != 0 else np.array([]) for f in ref_freqs]
else:
ref_times, ref_freqs = load_annotation(
annotation_fpath, n_freqs=None, to_array=False, rm_zeros=True
)
r_times = []
r_freqs = []
for t, freq in zip(ref_times, ref_freqs):
r_times.extend([t for f in freq])
r_freqs.extend([f for f in freq])
# plot annotation
plt.semilogy(
r_times, r_freqs, 'ok', basey=2, markersize=6
)
# plot contours
c1 = sns.color_palette('bright', 4)[2]
for i in ctr.nums:
plt.semilogy(ctr.contour_times(i), ctr.contour_freqs(i),
basey=2, markersize=2, color=c1)
plt.xlabel('Time (sec)')
plt.ylabel('Frequency (Hz)')
plt.axis('tight')
def plot_contours(ctr, style='contour'):
'''Plot contours.
Parameters
----------
ctr : Contours
An instance of a Contours object
style : str
One of:
- 'contour': plot each extracted contour, where each contour
gets its own color.
- 'salience': plot the contours where the colors denote the
salience.
'''
if style == 'contour':
for i in ctr.nums:
plt.plot(ctr.contour_times(i), ctr.contour_freqs(i))
elif style == 'salience':
plt.scatter(
ctr.times, ctr.freqs,
c=(ctr.salience / np.max(ctr.salience)), cmap='BuGn',
edgecolors='face', marker='.'
)
plt.colorbar()
plt.xlabel('Time (sec)')
plt.ylabel('Frequency (Hz)')
plt.axis('tight')
| mit |
q1ang/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
nonhermitian/scipy | scipy/signal/_max_len_seq.py | 41 | 4942 | # Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
http://www.newwaveinstruments.com/resources/articles/
m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], np.intp)
else:
taps = np.unique(np.array(taps, np.intp))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.ascontiguousarray(taps) # needed for Cython
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because numpy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-dimensional array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| bsd-3-clause |
vortex-ape/scikit-learn | benchmarks/bench_plot_ward.py | 117 | 1283 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
plt.figure("scikit-learn Ward's method benchmark results")
plt.imshow(np.log(ratio), aspect='auto', origin="lower")
plt.colorbar()
plt.contour(ratio, levels=[1, ], colors='k')
plt.yticks(range(len(n_features)), n_features.astype(np.int))
plt.ylabel('N features')
plt.xticks(range(len(n_samples)), n_samples.astype(np.int))
plt.xlabel('N samples')
plt.title("Scikit's time, in units of scipy time (log)")
plt.show()
| bsd-3-clause |
PythonCharmers/python-future | tests/test_future/test_futurize.py | 1 | 43360 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import pprint
import tempfile
from subprocess import Popen, PIPE
import os
from libfuturize.fixer_util import is_shebang_comment, is_encoding_comment
from lib2to3.fixer_util import FromImport
from lib2to3.pytree import Leaf, Node
from lib2to3.pygram import token
from future.tests.base import (CodeHandler, unittest, skip26, reformat_code,
order_future_lines, expectedFailurePY26)
from future.utils import PY2
class TestLibFuturize(unittest.TestCase):
def setUp(self):
# For tests that need a text file:
_, self.textfilename = tempfile.mkstemp(text=True)
super(TestLibFuturize, self).setUp()
def tearDown(self):
os.unlink(self.textfilename)
def test_correct_exit_status(self):
"""
Issue #119: futurize and pasteurize were not exiting with the correct
status code. This is because the status code returned from
libfuturize.main.main() etc. was a ``newint``, which sys.exit() always
translates into 1!
"""
from libfuturize.main import main
retcode = main([self.textfilename])
self.assertTrue(isinstance(retcode, int)) # i.e. Py2 builtin int
def test_is_shebang_comment(self):
"""
Tests whether the fixer_util.is_encoding_comment() function is working.
"""
shebang_comments = [u'#!/usr/bin/env python\n'
u"#!/usr/bin/python2\n",
u"#! /usr/bin/python3\n",
]
not_shebang_comments = [u"# I saw a giant python\n",
u"# I have never seen a python2\n",
]
for comment in shebang_comments:
node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
node.prefix = comment
self.assertTrue(is_shebang_comment(node))
for comment in not_shebang_comments:
node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
node.prefix = comment
self.assertFalse(is_shebang_comment(node))
def test_is_encoding_comment(self):
"""
Tests whether the fixer_util.is_encoding_comment() function is working.
"""
encoding_comments = [u"# coding: utf-8",
u"# encoding: utf-8",
u"# -*- coding: latin-1 -*-",
u"# vim: set fileencoding=iso-8859-15 :",
]
not_encoding_comments = [u"# We use the file encoding utf-8",
u"coding = 'utf-8'",
u"encoding = 'utf-8'",
]
for comment in encoding_comments:
node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
node.prefix = comment
self.assertTrue(is_encoding_comment(node))
for comment in not_encoding_comments:
node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
node.prefix = comment
self.assertFalse(is_encoding_comment(node))
class TestFuturizeSimple(CodeHandler):
"""
This class contains snippets of Python 2 code (invalid Python 3) and
tests for whether they can be passed to ``futurize`` and immediately
run under both Python 2 again and Python 3.
"""
def test_encoding_comments_kept_at_top(self):
"""
Issues #10 and #97: If there is a source encoding comment line
(PEP 263), is it kept at the top of a module by ``futurize``?
"""
before = """
# coding=utf-8
print 'Hello'
"""
after = """
# coding=utf-8
from __future__ import print_function
print('Hello')
"""
self.convert_check(before, after)
before = """
#!/usr/bin/env python
# -*- coding: latin-1 -*-"
print 'Hello'
"""
after = """
#!/usr/bin/env python
# -*- coding: latin-1 -*-"
from __future__ import print_function
print('Hello')
"""
self.convert_check(before, after)
def test_multiline_future_import(self):
"""
Issue #113: don't crash if a future import has multiple lines
"""
text = """
from __future__ import (
division
)
"""
self.convert(text)
def test_shebang_blank_with_future_division_import(self):
"""
Issue #43: Is shebang line preserved as the first
line by futurize when followed by a blank line?
"""
before = """
#!/usr/bin/env python
import math
1 / 5
"""
after = """
#!/usr/bin/env python
from __future__ import division
from past.utils import old_div
import math
old_div(1, 5)
"""
self.convert_check(before, after)
def test_shebang_blank_with_print_import(self):
before = """
#!/usr/bin/env python
import math
print 'Hello'
"""
after = """
#!/usr/bin/env python
from __future__ import print_function
import math
print('Hello')
"""
self.convert_check(before, after)
def test_shebang_comment(self):
"""
Issue #43: Is shebang line preserved as the first
line by futurize when followed by a comment?
"""
before = """
#!/usr/bin/env python
# some comments
# and more comments
import math
print 'Hello!'
"""
after = """
#!/usr/bin/env python
# some comments
# and more comments
from __future__ import print_function
import math
print('Hello!')
"""
self.convert_check(before, after)
def test_shebang_docstring(self):
"""
Issue #43: Is shebang line preserved as the first
line by futurize when followed by a docstring?
"""
before = '''
#!/usr/bin/env python
"""
a doc string
"""
import math
print 'Hello!'
'''
after = '''
#!/usr/bin/env python
"""
a doc string
"""
from __future__ import print_function
import math
print('Hello!')
'''
self.convert_check(before, after)
def test_oldstyle_classes(self):
"""
Stage 2 should convert old-style to new-style classes. This makes
the new-style class explicit and reduces the gap between the
behaviour (e.g. method resolution order) on Py2 and Py3. It also
allows us to provide ``newobject`` (see
test_oldstyle_classes_iterator).
"""
before = """
class Blah:
pass
"""
after = """
from builtins import object
class Blah(object):
pass
"""
self.convert_check(before, after, ignore_imports=False)
def test_oldstyle_classes_iterator(self):
"""
An old-style class used as an iterator should be converted
properly. This requires ``futurize`` to do both steps (adding
inheritance from object and adding the newobject import) in the
right order. Any next() method should also be renamed to __next__.
"""
before = """
class Upper:
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self):
return next(self._iter).upper()
def __iter__(self):
return self
assert list(Upper('hello')) == list('HELLO')
"""
after = """
from builtins import next
from builtins import object
class Upper(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __next__(self):
return next(self._iter).upper()
def __iter__(self):
return self
assert list(Upper('hello')) == list('HELLO')
"""
self.convert_check(before, after, ignore_imports=False)
# Try it again with this convention: class Upper():
before2 = """
class Upper():
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self):
return next(self._iter).upper()
def __iter__(self):
return self
assert list(Upper('hello')) == list('HELLO')
"""
self.convert_check(before2, after)
@unittest.expectedFailure
def test_problematic_string(self):
""" This string generates a SyntaxError on Python 3 unless it has
an r prefix.
"""
before = r"""
s = 'The folder is "C:\Users"'.
"""
after = r"""
s = r'The folder is "C:\Users"'.
"""
self.convert_check(before, after)
@unittest.skip('--tobytes feature removed for now ...')
def test_tobytes(self):
"""
The --tobytes option converts all UNADORNED string literals 'abcd' to b'abcd'.
It does apply to multi-line strings but doesn't apply if it's a raw
string, because ur'abcd' is a SyntaxError on Python 2 and br'abcd' is a
SyntaxError on Python 3.
"""
before = r"""
s0 = '1234'
s1 = '''5678
'''
s2 = "9abc"
# Unchanged:
s3 = r'1234'
s4 = R"defg"
s5 = u'hijk'
s6 = u"lmno"
s7 = b'lmno'
s8 = b"pqrs"
"""
after = r"""
s0 = b'1234'
s1 = b'''5678
'''
s2 = b"9abc"
# Unchanged:
s3 = r'1234'
s4 = R"defg"
s5 = u'hijk'
s6 = u"lmno"
s7 = b'lmno'
s8 = b"pqrs"
"""
self.convert_check(before, after, tobytes=True)
def test_cmp(self):
before = """
assert cmp(1, 2) == -1
assert cmp(2, 1) == 1
"""
after = """
from past.builtins import cmp
assert cmp(1, 2) == -1
assert cmp(2, 1) == 1
"""
self.convert_check(before, after, stages=(1, 2), ignore_imports=False)
def test_execfile(self):
before = """
with open('mytempfile.py', 'w') as f:
f.write('x = 1')
execfile('mytempfile.py')
x += 1
assert x == 2
"""
after = """
from past.builtins import execfile
with open('mytempfile.py', 'w') as f:
f.write('x = 1')
execfile('mytempfile.py')
x += 1
assert x == 2
"""
self.convert_check(before, after, stages=(1, 2), ignore_imports=False)
@unittest.expectedFailure
def test_izip(self):
before = """
from itertools import izip
for (a, b) in izip([1, 3, 5], [2, 4, 6]):
pass
"""
after = """
from builtins import zip
for (a, b) in zip([1, 3, 5], [2, 4, 6]):
pass
"""
self.convert_check(before, after, stages=(1, 2), ignore_imports=False)
def test_UserList(self):
before = """
from UserList import UserList
a = UserList([1, 3, 5])
assert len(a) == 3
"""
after = """
from collections import UserList
a = UserList([1, 3, 5])
assert len(a) == 3
"""
self.convert_check(before, after, stages=(1, 2), ignore_imports=True)
@unittest.expectedFailure
def test_no_unneeded_list_calls(self):
"""
TODO: get this working
"""
code = """
for (a, b) in zip(range(3), range(3, 6)):
pass
"""
self.unchanged(code)
@expectedFailurePY26
def test_import_builtins(self):
before = """
a = raw_input()
b = open(a, b, c)
c = filter(a, b)
d = map(a, b)
e = isinstance(a, str)
f = bytes(a, encoding='utf-8')
for g in xrange(10**10):
pass
h = reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])
super(MyClass, self)
"""
after = """
from builtins import bytes
from builtins import filter
from builtins import input
from builtins import map
from builtins import range
from functools import reduce
a = input()
b = open(a, b, c)
c = list(filter(a, b))
d = list(map(a, b))
e = isinstance(a, str)
f = bytes(a, encoding='utf-8')
for g in range(10**10):
pass
h = reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])
super(MyClass, self)
"""
self.convert_check(before, after, ignore_imports=False, run=False)
@expectedFailurePY26
def test_input_without_import(self):
before = """
a = input()
"""
after = """
from builtins import input
a = eval(input())
"""
self.convert_check(before, after, ignore_imports=False, run=False)
def test_input_with_import(self):
before = """
from builtins import input
a = input()
"""
after = """
from builtins import input
a = input()
"""
self.convert_check(before, after, ignore_imports=False, run=False)
def test_xrange(self):
"""
The ``from builtins import range`` line was being added to the
bottom of the file as of v0.11.4, but only using Py2.7's lib2to3.
(Py3.3's lib2to3 seems to work.)
"""
before = """
for i in xrange(10):
pass
"""
after = """
from builtins import range
for i in range(10):
pass
"""
self.convert_check(before, after, ignore_imports=False)
def test_source_coding_utf8(self):
"""
Tests to ensure that the source coding line is not corrupted or
removed. It must be left as the first line in the file (including
before any __future__ imports). Also tests whether the unicode
characters in this encoding are parsed correctly and left alone.
"""
code = """
# -*- coding: utf-8 -*-
icons = [u"◐", u"◓", u"◑", u"◒"]
"""
def test_exception_syntax(self):
"""
Test of whether futurize handles the old-style exception syntax
"""
before = """
try:
pass
except IOError, e:
val = e.errno
"""
after = """
try:
pass
except IOError as e:
val = e.errno
"""
self.convert_check(before, after)
def test_super(self):
"""
This tests whether futurize keeps the old two-argument super() calls the
same as before. It should, because this still works in Py3.
"""
code = '''
class VerboseList(list):
def append(self, item):
print('Adding an item')
super(VerboseList, self).append(item)
'''
self.unchanged(code)
@unittest.expectedFailure
def test_file(self):
"""
file() as a synonym for open() is obsolete and invalid on Python 3.
"""
before = '''
f = file(self.textfilename)
data = f.read()
f.close()
'''
after = '''
f = open(__file__)
data = f.read()
f.close()
'''
self.convert_check(before, after)
def test_apply(self):
before = '''
def addup(*x):
return sum(x)
assert apply(addup, (10,20)) == 30
'''
after = """
def addup(*x):
return sum(x)
assert addup(*(10,20)) == 30
"""
self.convert_check(before, after)
@unittest.skip('not implemented yet')
def test_download_pypi_package_and_test(self):
URL = 'http://pypi.python.org/pypi/{0}/json'
import requests
package = 'future'
r = requests.get(URL.format(package))
pprint.pprint(r.json())
download_url = r.json()['urls'][0]['url']
filename = r.json()['urls'][0]['filename']
# r2 = requests.get(download_url)
# with open('/tmp/' + filename, 'w') as tarball:
# tarball.write(r2.content)
@expectedFailurePY26
def test_raw_input(self):
"""
Passes in a string to the waiting input() after futurize
conversion.
The code is the first snippet from these docs:
http://docs.python.org/2/library/2to3.html
"""
before = """
from io import BytesIO
def greet(name):
print "Hello, {0}!".format(name)
print "What's your name?"
import sys
oldstdin = sys.stdin
sys.stdin = BytesIO(b'Ed\\n')
name = raw_input()
greet(name.decode())
sys.stdin = oldstdin
assert name == b'Ed'
"""
desired = """
from io import BytesIO
def greet(name):
print("Hello, {0}!".format(name))
print("What's your name?")
import sys
oldstdin = sys.stdin
sys.stdin = BytesIO(b'Ed\\n')
name = input()
greet(name.decode())
sys.stdin = oldstdin
assert name == b'Ed'
"""
self.convert_check(before, desired, run=False)
for interpreter in self.interpreters:
p1 = Popen([interpreter, self.tempdir + 'mytestscript.py'],
stdout=PIPE, stdin=PIPE, stderr=PIPE)
(stdout, stderr) = p1.communicate(b'Ed')
self.assertEqual(stderr, b'')
self.assertEqual(stdout, b"What's your name?\nHello, Ed!\n")
def test_literal_prefixes_are_not_stripped(self):
"""
Tests to ensure that the u'' and b'' prefixes on unicode strings and
byte strings are not removed by the futurize script. Removing the
prefixes on Py3.3+ is unnecessary and loses some information -- namely,
that the strings have explicitly been marked as unicode or bytes,
rather than just e.g. a guess by some automated tool about what they
are.
"""
code = '''
s = u'unicode string'
b = b'byte string'
'''
self.unchanged(code)
def test_division(self):
before = """
x = 1 / 2
"""
after = """
from past.utils import old_div
x = old_div(1, 2)
"""
self.convert_check(before, after, stages=[1, 2])
def test_already_future_division(self):
code = """
from __future__ import division
x = 1 / 2
assert x == 0.5
y = 3. / 2.
assert y == 1.5
"""
self.unchanged(code)
class TestFuturizeRenamedStdlib(CodeHandler):
@unittest.skip('Infinite loop?')
def test_renamed_modules(self):
before = """
import ConfigParser
import copy_reg
import cPickle
import cStringIO
"""
after = """
import configparser
import copyreg
import pickle
import io
"""
# We can't run the converted code because configparser may
# not be there.
self.convert_check(before, after, run=False)
@unittest.skip('Not working yet ...')
def test_urllib_refactor(self):
# Code like this using urllib is refactored by futurize --stage2 to use
# the new Py3 module names, but ``future`` doesn't support urllib yet.
before = """
import urllib
URL = 'http://pypi.python.org/pypi/future/json'
package = 'future'
r = urllib.urlopen(URL.format(package))
data = r.read()
"""
after = """
from future import standard_library
standard_library.install_aliases()
import urllib.request
URL = 'http://pypi.python.org/pypi/future/json'
package = 'future'
r = urllib.request.urlopen(URL.format(package))
data = r.read()
"""
self.convert_check(before, after)
@unittest.skip('Infinite loop?')
def test_renamed_copy_reg_and_cPickle_modules(self):
"""
Example from docs.python.org/2/library/copy_reg.html
"""
before = """
import copy_reg
import copy
import cPickle
class C(object):
def __init__(self, a):
self.a = a
def pickle_c(c):
print('pickling a C instance...')
return C, (c.a,)
copy_reg.pickle(C, pickle_c)
c = C(1)
d = copy.copy(c)
p = cPickle.dumps(c)
"""
after = """
import copyreg
import copy
import pickle
class C(object):
def __init__(self, a):
self.a = a
def pickle_c(c):
print('pickling a C instance...')
return C, (c.a,)
copyreg.pickle(C, pickle_c)
c = C(1)
d = copy.copy(c)
p = pickle.dumps(c)
"""
self.convert_check(before, after)
@unittest.expectedFailure
def test_Py2_StringIO_module(self):
"""
This requires that the argument to io.StringIO be made a
unicode string explicitly if we're not using unicode_literals:
Ideally, there would be a fixer for this. For now:
TODO: add the Py3 equivalent for this to the docs. Also add back
a test for the unicode_literals case.
"""
before = """
import cStringIO
import StringIO
s1 = cStringIO.StringIO('my string')
s2 = StringIO.StringIO('my other string')
assert isinstance(s1, cStringIO.InputType)
"""
# There is no io.InputType in Python 3. futurize should change this to
# something like this. But note that the input to io.StringIO
# must be a unicode string on both Py2 and Py3.
after = """
import io
import io
s1 = io.StringIO(u'my string')
s2 = io.StringIO(u'my other string')
assert isinstance(s1, io.StringIO)
"""
self.convert_check(before, after)
class TestFuturizeStage1(CodeHandler):
"""
Tests "stage 1": safe optimizations: modernizing Python 2 code so that it
uses print functions, new-style exception syntax, etc.
The behaviour should not change and this should introduce no dependency on
the ``future`` package. It produces more modern Python 2-only code. The
goal is to reduce the size of the real porting patch-set by performing
the uncontroversial patches first.
"""
def test_apply(self):
"""
apply() should be changed by futurize --stage1
"""
before = '''
def f(a, b):
return a + b
args = (1, 2)
assert apply(f, args) == 3
assert apply(f, ('a', 'b')) == 'ab'
'''
after = '''
def f(a, b):
return a + b
args = (1, 2)
assert f(*args) == 3
assert f(*('a', 'b')) == 'ab'
'''
self.convert_check(before, after, stages=[1])
def test_next_1(self):
"""
Custom next methods should not be converted to __next__ in stage1, but
any obj.next() calls should be converted to next(obj).
"""
before = """
class Upper:
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self): # note the Py2 interface
return next(self._iter).upper()
def __iter__(self):
return self
itr = Upper('hello')
assert itr.next() == 'H'
assert next(itr) == 'E'
assert list(itr) == list('LLO')
"""
after = """
class Upper:
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self): # note the Py2 interface
return next(self._iter).upper()
def __iter__(self):
return self
itr = Upper('hello')
assert next(itr) == 'H'
assert next(itr) == 'E'
assert list(itr) == list('LLO')
"""
self.convert_check(before, after, stages=[1], run=PY2)
@unittest.expectedFailure
def test_next_2(self):
"""
This version of the above doesn't currently work: the self._iter.next() call in
line 5 isn't converted to next(self._iter).
"""
before = """
class Upper:
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self): # note the Py2 interface
return self._iter.next().upper()
def __iter__(self):
return self
itr = Upper('hello')
assert itr.next() == 'H'
assert next(itr) == 'E'
assert list(itr) == list('LLO')
"""
after = """
class Upper(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def next(self): # note the Py2 interface
return next(self._iter).upper()
def __iter__(self):
return self
itr = Upper('hello')
assert next(itr) == 'H'
assert next(itr) == 'E'
assert list(itr) == list('LLO')
"""
self.convert_check(before, after, stages=[1], run=PY2)
def test_xrange(self):
"""
xrange should not be changed by futurize --stage1
"""
code = '''
for i in xrange(10):
pass
'''
self.unchanged(code, stages=[1], run=PY2)
@unittest.expectedFailure
def test_absolute_import_changes(self):
"""
Implicit relative imports should be converted to absolute or explicit
relative imports correctly.
Issue #16 (with porting bokeh/bbmodel.py)
"""
with open(self.tempdir + 'specialmodels.py', 'w') as f:
f.write('pass')
before = """
import specialmodels.pandasmodel
specialmodels.pandasmodel.blah()
"""
after = """
from __future__ import absolute_import
from .specialmodels import pandasmodel
pandasmodel.blah()
"""
self.convert_check(before, after, stages=[1])
def test_safe_futurize_imports(self):
"""
The standard library module names should not be changed until stage 2
"""
before = """
import ConfigParser
import HTMLParser
from itertools import ifilterfalse
ConfigParser.ConfigParser
HTMLParser.HTMLParser
assert list(ifilterfalse(lambda x: x % 2, [2, 4])) == [2, 4]
"""
self.unchanged(before, stages=[1], run=PY2)
def test_print(self):
before = """
print 'Hello'
"""
after = """
print('Hello')
"""
self.convert_check(before, after, stages=[1])
before = """
import sys
print >> sys.stderr, 'Hello', 'world'
"""
after = """
import sys
print('Hello', 'world', file=sys.stderr)
"""
self.convert_check(before, after, stages=[1])
def test_print_already_function(self):
"""
Running futurize --stage1 should not add a second set of parentheses
"""
before = """
print('Hello')
"""
self.unchanged(before, stages=[1])
@unittest.expectedFailure
def test_print_already_function_complex(self):
"""
Running futurize --stage1 does add a second second set of parentheses
in this case. This is because the underlying lib2to3 has two distinct
grammars -- with a print statement and with a print function -- and,
when going forwards (2 to both), futurize assumes print is a statement,
which raises a ParseError.
"""
before = """
import sys
print('Hello', 'world', file=sys.stderr)
"""
self.unchanged(before, stages=[1])
def test_exceptions(self):
before = """
try:
raise AttributeError('blah')
except AttributeError, e:
pass
"""
after = """
try:
raise AttributeError('blah')
except AttributeError as e:
pass
"""
self.convert_check(before, after, stages=[1])
@unittest.expectedFailure
def test_string_exceptions(self):
"""
2to3 does not convert string exceptions: see
http://python3porting.com/differences.html.
"""
before = """
try:
raise "old string exception"
except Exception, e:
pass
"""
after = """
try:
raise Exception("old string exception")
except Exception as e:
pass
"""
self.convert_check(before, after, stages=[1])
def test_oldstyle_classes(self):
"""
We don't convert old-style classes to new-style automatically in
stage 1 (but we should in stage 2). So Blah should not inherit
explicitly from object yet.
"""
before = """
class Blah:
pass
"""
self.unchanged(before, stages=[1])
def test_stdlib_modules_not_changed(self):
"""
Standard library module names should not be changed in stage 1
"""
before = """
import ConfigParser
import HTMLParser
import collections
print 'Hello'
try:
raise AttributeError('blah')
except AttributeError, e:
pass
"""
after = """
import ConfigParser
import HTMLParser
import collections
print('Hello')
try:
raise AttributeError('blah')
except AttributeError as e:
pass
"""
self.convert_check(before, after, stages=[1], run=PY2)
def test_octal_literals(self):
before = """
mode = 0644
"""
after = """
mode = 0o644
"""
self.convert_check(before, after)
def test_long_int_literals(self):
before = """
bignumber = 12345678901234567890L
"""
after = """
bignumber = 12345678901234567890
"""
self.convert_check(before, after)
def test___future___import_position(self):
"""
Issue #4: __future__ imports inserted too low in file: SyntaxError
"""
code = """
# Comments here
# and here
__version__=''' $Id$ '''
__doc__="A Sequencer class counts things. It aids numbering and formatting lists."
__all__='Sequencer getSequencer setSequencer'.split()
#
# another comment
#
CONSTANTS = [ 0, 01, 011, 0111, 012, 02, 021, 0211, 02111, 013 ]
_RN_LETTERS = "IVXLCDM"
def my_func(value):
pass
''' Docstring-like comment here '''
"""
self.convert(code)
def test_issue_45(self):
"""
Tests whether running futurize -f libfuturize.fixes.fix_future_standard_library_urllib
on the code below causes a ValueError (issue #45).
"""
code = r"""
from __future__ import print_function
from urllib import urlopen, urlencode
oeis_url = 'http://oeis.org/'
def _fetch(url):
try:
f = urlopen(url)
result = f.read()
f.close()
return result
except IOError as msg:
raise IOError("%s\nError fetching %s." % (msg, url))
"""
self.convert(code)
def test_order_future_lines(self):
"""
Tests the internal order_future_lines() function.
"""
before = '''
# comment here
from __future__ import print_function
from __future__ import absolute_import
# blank line or comment here
from future.utils import with_metaclass
from builtins import zzz
from builtins import aaa
from builtins import blah
# another comment
import something_else
code_here
more_code_here
'''
after = '''
# comment here
from __future__ import absolute_import
from __future__ import print_function
# blank line or comment here
from future.utils import with_metaclass
from builtins import aaa
from builtins import blah
from builtins import zzz
# another comment
import something_else
code_here
more_code_here
'''
self.assertEqual(order_future_lines(reformat_code(before)),
reformat_code(after))
@unittest.expectedFailure
def test_issue_12(self):
"""
Issue #12: This code shouldn't be upset by additional imports.
__future__ imports must appear at the top of modules since about Python
2.5.
"""
code = """
from __future__ import with_statement
f = open('setup.py')
for i in xrange(100):
pass
"""
self.unchanged(code)
@expectedFailurePY26
def test_range_necessary_list_calls(self):
"""
On Py2.6 (only), the xrange_with_import fixer somehow seems to cause
l = range(10)
to be converted to:
l = list(list(range(10)))
with an extra list(...) call.
"""
before = """
l = range(10)
assert isinstance(l, list)
for i in range(3):
print i
for i in xrange(3):
print i
"""
after = """
from __future__ import print_function
from builtins import range
l = list(range(10))
assert isinstance(l, list)
for i in range(3):
print(i)
for i in range(3):
print(i)
"""
self.convert_check(before, after)
def test_basestring(self):
"""
The 2to3 basestring fixer breaks working Py2 code that uses basestring.
This tests whether something sensible is done instead.
"""
before = """
assert isinstance('hello', basestring)
assert isinstance(u'hello', basestring)
assert isinstance(b'hello', basestring)
"""
after = """
from past.builtins import basestring
assert isinstance('hello', basestring)
assert isinstance(u'hello', basestring)
assert isinstance(b'hello', basestring)
"""
self.convert_check(before, after)
def test_safe_division(self):
"""
Tests whether Py2 scripts using old-style division still work
after futurization.
"""
before = """
import random
class fraction(object):
numer = 0
denom = 0
def __init__(self, numer, denom):
self.numer = numer
self.denom = denom
def total_count(self):
return self.numer * 50
x = 3 / 2
y = 3. / 2
foo = list(range(100))
assert x == 1 and isinstance(x, int)
assert y == 1.5 and isinstance(y, float)
a = 1 + foo[len(foo) / 2]
b = 1 + foo[len(foo) * 3 / 4]
assert a == 51
assert b == 76
r = random.randint(0, 1000) * 1.0 / 1000
output = { "SUCCESS": 5, "TOTAL": 10 }
output["SUCCESS"] * 100 / output["TOTAL"]
obj = fraction(1, 50)
val = float(obj.numer) / obj.denom * 1e-9
obj.numer * obj.denom / val
obj.total_count() * val / 100
obj.numer / obj.denom * 1e-9
obj.numer / (obj.denom * 1e-9)
obj.numer / obj.denom / 1e-9
obj.numer / (obj.denom / 1e-9)
original_numer = 1
original_denom = 50
100 * abs(obj.numer - original_numer) / float(max(obj.denom, original_denom))
100 * abs(obj.numer - original_numer) / max(obj.denom, original_denom)
float(original_numer) * float(original_denom) / float(obj.numer)
"""
after = """
from __future__ import division
from past.utils import old_div
import random
class fraction(object):
numer = 0
denom = 0
def __init__(self, numer, denom):
self.numer = numer
self.denom = denom
def total_count(self):
return self.numer * 50
x = old_div(3, 2)
y = 3. / 2
foo = list(range(100))
assert x == 1 and isinstance(x, int)
assert y == 1.5 and isinstance(y, float)
a = 1 + foo[old_div(len(foo), 2)]
b = 1 + foo[old_div(len(foo) * 3, 4)]
assert a == 51
assert b == 76
r = random.randint(0, 1000) * 1.0 / 1000
output = { "SUCCESS": 5, "TOTAL": 10 }
old_div(output["SUCCESS"] * 100, output["TOTAL"])
obj = fraction(1, 50)
val = float(obj.numer) / obj.denom * 1e-9
old_div(obj.numer * obj.denom, val)
old_div(obj.total_count() * val, 100)
old_div(obj.numer, obj.denom) * 1e-9
old_div(obj.numer, (obj.denom * 1e-9))
old_div(old_div(obj.numer, obj.denom), 1e-9)
old_div(obj.numer, (old_div(obj.denom, 1e-9)))
original_numer = 1
original_denom = 50
100 * abs(obj.numer - original_numer) / float(max(obj.denom, original_denom))
old_div(100 * abs(obj.numer - original_numer), max(obj.denom, original_denom))
float(original_numer) * float(original_denom) / float(obj.numer)
"""
self.convert_check(before, after)
def test_safe_division_overloaded(self):
"""
If division is overloaded, futurize may produce spurious old_div
calls. This test is for whether the code still works on Py2
despite these calls.
"""
before = """
class Path(str):
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
return Path(str(self) + '/' + str(other))
path1 = Path('home')
path2 = Path('user')
z = path1 / path2
assert isinstance(z, Path)
assert str(z) == 'home/user'
"""
after = """
from __future__ import division
from past.utils import old_div
class Path(str):
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
return Path(str(self) + '/' + str(other))
path1 = Path('home')
path2 = Path('user')
z = old_div(path1, path2)
assert isinstance(z, Path)
assert str(z) == 'home/user'
"""
self.convert_check(before, after)
def test_basestring_issue_156(self):
before = """
x = str(3)
allowed_types = basestring, int
assert isinstance('', allowed_types)
assert isinstance(u'', allowed_types)
assert isinstance(u'foo', basestring)
"""
after = """
from builtins import str
from past.builtins import basestring
x = str(3)
allowed_types = basestring, int
assert isinstance('', allowed_types)
assert isinstance(u'', allowed_types)
assert isinstance(u'foo', basestring)
"""
self.convert_check(before, after)
class TestConservativeFuturize(CodeHandler):
@unittest.expectedFailure
def test_basestring(self):
"""
In conservative mode, futurize would not modify "basestring"
but merely import it from ``past``, and the following code would still
run on both Py2 and Py3.
"""
before = """
assert isinstance('hello', basestring)
assert isinstance(u'hello', basestring)
assert isinstance(b'hello', basestring)
"""
after = """
from past.builtins import basestring
assert isinstance('hello', basestring)
assert isinstance(u'hello', basestring)
assert isinstance(b'hello', basestring)
"""
self.convert_check(before, after, conservative=True)
@unittest.expectedFailure
def test_open(self):
"""
In conservative mode, futurize would not import io.open because
this changes the default return type from bytes to text.
"""
before = """
filename = 'temp_file_open.test'
contents = 'Temporary file contents. Delete me.'
with open(filename, 'w') as f:
f.write(contents)
with open(filename, 'r') as f:
data = f.read()
assert isinstance(data, str)
assert data == contents
"""
after = """
from past.builtins import open, str as oldbytes, unicode
filename = oldbytes(b'temp_file_open.test')
contents = oldbytes(b'Temporary file contents. Delete me.')
with open(filename, oldbytes(b'w')) as f:
f.write(contents)
with open(filename, oldbytes(b'r')) as f:
data = f.read()
assert isinstance(data, oldbytes)
assert data == contents
assert isinstance(oldbytes(b'hello'), basestring)
assert isinstance(unicode(u'hello'), basestring)
assert isinstance(oldbytes(b'hello'), basestring)
"""
self.convert_check(before, after, conservative=True)
class TestFuturizeAllImports(CodeHandler):
"""
Tests "futurize --all-imports".
"""
@expectedFailurePY26
def test_all_imports(self):
before = """
import math
import os
l = range(10)
assert isinstance(l, list)
print 'Hello'
for i in xrange(100):
pass
print('Hello')
"""
after = """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import range
import math
import os
l = list(range(10))
assert isinstance(l, list)
print('Hello')
for i in range(100):
pass
print('Hello')
"""
self.convert_check(before, after, all_imports=True, ignore_imports=False)
if __name__ == '__main__':
unittest.main()
| mit |
trankmichael/scipy | scipy/interpolate/ndgriddata.py | 45 | 7161 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
self.tree = cKDTree(self.points)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
andrewnc/scikit-learn | sklearn/linear_model/tests/test_sag.py | 93 | 25649 | # Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.preprocessing import LabelEncoder
from sklearn.datasets import make_blobs
from sklearn.base import clone
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
sum_gradient += update - gradient_memory[idx]
gradient_memory[idx] = update
if fit_intercept:
intercept_sum_gradient += (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
sum_gradient += update - (gradient_memory[idx] * entry)
if fit_intercept:
intercept_sum_gradient += gradient - gradient_memory[idx]
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1))
+ fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
n_iter = 80
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
clf = LogisticRegression(solver="sag", fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=10)
assert_array_almost_equal(intercept, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
#assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
#assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = get_max_squared_sum(X)
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for fit_intercept in (True, False):
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha, "squared",
fit_intercept)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
def test_get_max_squared_sum():
n_samples = 100
n_features = 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_features).astype(np.float64)
mask = rng.randn(n_samples, n_features)
X[mask > 0] = 0.
X_csr = sp.csr_matrix(X)
X[0, 3] = 0.
X_csr[0, 3] = 0.
sum_X = get_max_squared_sum(X)
sum_X_csr = get_max_squared_sum(X_csr)
assert_almost_equal(sum_X, sum_X_csr)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
| bsd-3-clause |
sfepy/sfepy | script/plot_times.py | 5 | 1722 | #!/usr/bin/env python
"""
Plot time steps, times of time steps and time deltas in a HDF5 results file.
"""
from __future__ import absolute_import
import sys
sys.path.append('.')
from argparse import ArgumentParser
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.postprocess.time_history import extract_times
helps = {
'logarithmic' :
'plot time steps in logarithmic scale',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-l', '--logarithmic',
action='store_true', dest='logarithmic',
default=False, help=helps['logarithmic'])
parser.add_argument('filename')
options = parser.parse_args()
filename = options.filename
plt.rcParams['lines.linewidth'] = 3
plt.rcParams['lines.markersize'] = 9
fontsize = 16
steps, times, nts, dts = extract_times(filename)
dts[-1] = nm.nan
ax = plt.subplot(211)
if options.logarithmic:
l1, = ax.semilogy(steps, dts, 'b')
else:
l1, = ax.plot(steps, dts, 'b')
ax.set_xlabel('step', fontsize=fontsize)
ax.set_ylabel(r'$\Delta t$', fontsize=fontsize)
ax.grid(True)
ax = ax.twinx()
l2, = ax.plot(steps, times, 'g')
ax.set_ylabel(r'$t$', fontsize=fontsize)
ax.legend([l1, l2], [r'$\Delta t$', r'$t$'], loc=0)
ax = plt.subplot(212)
if options.logarithmic:
ax.semilogy(times, dts, 'b+')
else:
ax.plot(times, dts, 'b+')
ax.set_xlabel(r'$t$', fontsize=fontsize)
ax.set_ylabel(r'$\Delta t$', fontsize=fontsize)
ax.grid(True)
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
ofgulban/tetrahydra | examples/conversions/rgb_to_ilr.py | 1 | 4535 | """Create isometric logratio transformed coordinates for MRI data."""
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import os
import numpy as np
import compoda.core as tet
from compoda.utils import truncate_range, scale_range
from nibabel import load, save, Nifti1Image
# Load data
nii1 = load('/path/to/file1.nii.gz')
nii2 = load('/path/to/file2.nii.gz')
nii3 = load('/path/to/file3.nii.gz')
mask = load('/path/to/mask.nii.gz').get_data()
mask[mask > 0] = 1. # binarize
basename = nii1.get_filename().split(os.extsep, 1)[0]
dirname = os.path.dirname(nii1.get_filename())
vol1 = nii1.get_data()
vol2 = nii2.get_data()
vol3 = nii3.get_data()
dims = vol1.shape + (3,)
comp = np.zeros(dims)
comp[..., 0] = vol1 * mask
comp[..., 1] = vol2 * mask
comp[..., 2] = vol3 * mask
comp = comp.reshape(dims[0]*dims[1]*dims[2], dims[3])
# (optional) truncate and rescale
for i in range(comp.shape[1]):
temp = comp[:, i]
temp = truncate_range(temp)
temp = scale_range(temp, scale_factor=1000)
comp[:, i] = temp
# Impute
comp[comp == 0] = 1.
# Closure
comp = tet.closure(comp)
# Plot related operations
p_mask = mask.reshape(dims[0]*dims[1]*dims[2])
p_comp = comp[p_mask > 0]
# Isometric logratio transformation before any centering
ilr_orig = tet.ilr_transformation(np.copy(p_comp))
# Centering
center = tet.sample_center(p_comp)
print("Sample center: " + str(center))
c_temp = np.ones(p_comp.shape) * center
p_comp = tet.perturb(p_comp, c_temp**-1)
# Standardize
totvar = tet.sample_total_variance(p_comp, center)
p_comp = tet.power(p_comp, np.power(totvar, -1./2.))
# Isometric logratio transformation for plotting
ilr = tet.ilr_transformation(p_comp)
# Plots
fig = plt.figure()
limits = [-2.5, 2.5]
ax_1 = plt.subplot(121)
# Plot 2D histogram of ilr transformed data
_, _, _, h_1 = ax_1.hist2d(ilr_orig[:, 0], ilr_orig[:, 1], bins=2000,
cmap='gray_r')
h_1.set_norm(LogNorm(vmax=np.power(10, 3)))
plt.colorbar(h_1, fraction=0.046, pad=0.04)
ax_1.set_title('Before Centering')
ax_1.set_xlabel('$v_1$')
ax_1.set_ylabel('$v_2$')
ax_1.set_aspect('equal')
ax_1.set_xlim(limits)
ax_1.set_ylim(limits)
ax_2 = plt.subplot(122)
# Plot 2D histogram of ilr transformed data
_, _, _, h_2 = ax_2.hist2d(ilr[:, 0], ilr[:, 1], bins=2000, cmap='gray_r')
h_2.set_norm(LogNorm(vmax=np.power(10, 3)))
plt.colorbar(h_2, fraction=0.046, pad=0.04)
ax_2.set_title('After Centering')
ax_2.set_xlabel('$v_1$')
ax_2.set_ylabel('$v_2$')
ax_2.set_aspect('equal')
ax_2.set_xlim(limits)
ax_2.set_ylim(limits)
# plot axes of primary colors on top
nr_nodes, max_node = 2, 15
caxw = 1 # width
for a in range(3): # loop through the primary axes
# create a set of compositions along a primary axis
nodes = np.linspace(1, max_node, nr_nodes)
c_axis = np.ones([nr_nodes, 3])
c_axis[:, a] = nodes
c_axis = tet.closure(c_axis)
c_axis = tet.ilr_transformation(c_axis)
ax_1.add_patch(patches.Polygon(c_axis, closed=False, linewidth=caxw,
facecolor='k', edgecolor='k'))
for a in range(3):
# create a set of compositions along a primary axis
nodes = np.linspace(1, max_node, nr_nodes)
c_axis = np.ones([nr_nodes, 3])
c_axis[:, a] = nodes
c_axis = tet.closure(c_axis)
# (optional) center the primary guides the same way
c_temp = np.ones(c_axis.shape) * center
c_axis = tet.perturb(c_axis, c_temp**-1.)
c_axis = tet.power(c_axis, np.power(totvar, -1./2.))
c_axis = tet.ilr_transformation(c_axis)
ax_2.add_patch(patches.Polygon(c_axis, closed=False, linewidth=caxw,
facecolor='k', edgecolor='k'))
plt.show()
print('Exporting ilr coordinates...')
# ilr transformation for nifti output (also considering unplotted data)
# Centering
c_temp = np.ones(comp.shape) * center
comp = tet.perturb(comp, c_temp**-1.)
# Standardize
comp = tet.power(comp, np.power(totvar, -1./2.))
ilr = tet.ilr_transformation(comp)
# save the new coordinates
ilr = ilr.reshape(dims[0], dims[1], dims[2], dims[3]-1)
for i in range(ilr.shape[-1]):
img = ilr[..., i] * mask
# scale is done for FSL-FAST otherwise it cannot find clusters
img = truncate_range(img, percMin=0, percMax=100)
img = scale_range(img, scale_factor=2000)
# img[mask == 0] = 0 # swap masked and imputed regions with zeros
out = Nifti1Image(img, affine=nii1.affine)
save(out, os.path.join(dirname, 'ilr_coord_'+str(i+1)+'.nii.gz'))
print('Finished.')
| gpl-3.0 |
robin-lai/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
thisch/pydipole | examples/singledipole.py | 1 | 2415 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import logging
LG = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
from dipole.field import dipole_e_ff
from dipole.field import dipole_radiant_intensity
from dipole.helper import gen_r
def plot_intens(T=None, P=None, intens=None, title=None, XY=None, ax=None):
if ax is None:
fig, ax = plt.subplots()
if XY is not None:
ax.pcolormesh(XY[0], XY[1], intens)
else:
ax.pcolormesh(np.degrees(T*np.cos(P)), np.degrees(T*np.sin(P)),
intens)
ax.set_xlabel(r'$\Theta_x$', fontsize=16)
ax.set_ylabel(r'$\Theta_y$', fontsize=16)
tm = 90
ax.set_xlim(-tm, tm)
ax.set_ylim(-tm, tm)
ax.set_xticks([-tm, -45, 0, 45, tm])
ax.set_yticks([-tm, -45, 0, 45, tm])
ax.set_aspect('equal')
if title:
ax.set_title(title)
return ax
def main(onsphere=False):
if not onsphere:
thetamax = 40.
else:
thetamax = 90.
k = 1.
Lam = 2*np.pi/k
reval = 1000*Lam
ngrid = 128
for align_axis in 'xyz':
LG.info('#### SETTINGS: k=%g, reval=%g', k, reval)
rparams = gen_r(ngrid, reval=reval, onsphere=onsphere, thetamax=thetamax)
pdisk = np.zeros((1, 3))
if align_axis == 'z':
pdisk[:, 2] = 1.
elif align_axis == 'x':
pdisk[:, 0] = 1.
elif align_axis == 'y':
pdisk[:, 1] = 1.
rdip = np.zeros((1, 3))
phases = np.zeros(1)
if onsphere:
# radiant intensity
intens = dipole_radiant_intensity(rparams[0],
rparams[1],
pdisk, rdip, phases, k)
else:
# is it better to calculate the intensity in z-direction and not
# the radiant-intensity?
eff = dipole_e_ff(rparams[-1], pdisk, rdip, phases, k, t=0)
intens = np.linalg.norm(eff, axis=2)**2
if onsphere:
T, P, _ = rparams
ax = plot_intens(T, P, intens)
else:
X, Y, _, = rparams
ax = plot_intens(intens=intens, XY=(X, Y))
ax.set_title('k=%g, dipole orientation: %s-axis' % (k, align_axis))
plt.show()
if __name__ == '__main__':
main(onsphere=True)
main(onsphere=False)
| mit |
rhattersley/cartopy | lib/cartopy/tests/mpl/test_nightshade.py | 3 | 1309 | # (C) British Crown Copyright 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from datetime import datetime
import matplotlib.pyplot as plt
import pytest
import cartopy.crs as ccrs
from cartopy.feature.nightshade import Nightshade
from cartopy.tests.mpl import ImageTesting
@pytest.mark.natural_earth
@ImageTesting(['nightshade_platecarree'])
def test_nightshade_image():
# Test the actual creation of the image
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
dt = datetime(2018, 11, 10, 0, 0)
ax.set_global()
ax.add_feature(Nightshade(dt, alpha=0.75))
| lgpl-3.0 |
hdmetor/scikit-learn | examples/model_selection/plot_precision_recall.py | 249 | 6150 | """
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
RayMick/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
BhallaLab/moose-examples | snippets/multiscaleOneCompt.py | 2 | 7296 | #########################################################################
## multiscaleOneCompt.py ---
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU General Public License version 2 or later.
## See the file COPYING.LIB for the full notice.
#########################################################################
from __future__ import print_function
import sys
import os
os.environ['NUMPTHREADS'] = '1'
import math
import numpy
import pylab
import matplotlib.pyplot as plt
import moose
import proto18
scriptDir = os.path.dirname( os.path.realpath( __file__ ) )
#EREST_ACT = -70e-3
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
# Disable all the prototypes.
for x in moose.wildcardFind( "/library/##" ):
x.tick = -1
model = moose.Neutral( '/model' )
cellId = moose.loadModel(
os.path.join( scriptDir, 'soma.p')
, '/model/elec', "Neutral"
)
moose.setCwe( '/' )
return cellId
def loadChem():
chem = moose.Neutral( '/model/chem' )
modelId = moose.loadModel(
os.path.join( scriptDir, '..', 'genesis', 'chanPhosphByCaMKII.g' )
, '/model/chem', 'ee'
)
stoich = moose.Stoich( '/model/chem/kinetics/stoich' )
ksolve = moose.Ksolve( '/model/chem/kinetics/ksolve' )
stoich.compartment = moose.element( '/model/chem/kinetics' )
stoich.ksolve = ksolve
stoich.path = "/model/chem/##"
def makeModel():
loadElec()
loadChem()
makeAdaptors()
def makeAdaptors():
##################################################################
# set up adaptor for elec model Ca -> chem model Ca
# Here it is easy because we don't have to deal with different
# sizes of electrical and chemical compartments.
adaptCa = moose.Adaptor( '/model/chem/kinetics/adaptCa' )
chemCa = moose.element( '/model/chem/kinetics/Ca' )
elecCa = moose.element( '/model/elec/soma/Ca_conc' )
moose.connect( elecCa, 'concOut', adaptCa, 'input' )
moose.connect( adaptCa, 'output', chemCa, 'setConc' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 0.0008
# set up adaptor for chem model chan -> elec model chan.
adaptChan = moose.Adaptor( '/model/chem/kinetics/adaptChan' )
chemChan = moose.element( '/model/chem/kinetics/chan' )
elecChan = moose.element( '/model/elec/soma/K_A' )
# The Adaptor has to request the output conc of the chemical pool,
# since there isn't an output message to deliver this value.
moose.connect( adaptChan, 'requestOut', chemChan, 'getConc' )
moose.connect( adaptChan, 'output', elecChan, 'setGbar' )
adaptChan.inputOffset = 0.0 #
adaptChan.outputOffset = 0.0
adaptChan.scale = 1e-5 #
def addPlot( objpath, field, plot, tick ):
if moose.exists( objpath ):
tab = moose.Table( '/graphs/' + plot )
obj = moose.element( objpath )
moose.connect( tab, 'requestOut', obj, field )
tab.tick = tick
return tab
else:
print(("failed in addPlot(", objpath, field, plot, tick, ")"))
return 0
def main():
"""
This example builds a simple multiscale model involving
electrical and chemical signaling, but without spatial dimensions.
The electrical cell model is in a single compartment and has
voltage-gated channels, including a voltage-gated Ca channel for
Ca influx, and a K_A channel which is regulated by the chemical
pathways.
The chemical model has calcium activating Calmodulin which activates
CaM-Kinase II. The kinase phosphorylates the K_A channel to inactivate
it.
The net effect of the multiscale activity is a positive feedback
loop where activity increases Ca, which activates the kinase,
which reduces K_A, leading to increased excitability of the cell.
In this example this results
in a bistable neuron. In the resting state the cell does not fire,
but if it is activated by a current pulse the cell will continue to
fire even after the current is turned off. Application of an
inhibitory current restores the cell to its silent state.
Both the electrical and chemical models are loaded in from model
description files, and these files could be replaced if one wished
to define different models. However, there
are model-specific Adaptor objects needed to map activity between the
models of the two kinds. The Adaptors connect specific model entities
between the two models. Here one Adaptor connects the electrical
Ca_conc object to the chemical Ca pool. The other Adaptor connects
the chemical pool representing the K_A channel to its conductance
term in the electrical model.
"""
runtime = 4
elecDt = 50e-6
chemDt = 0.005
ePlotDt = 0.5e-3
cPlotDt = 0.0025
makeModel()
moose.setClock( 8, ePlotDt )
moose.setClock( 18, cPlotDt )
for i in range( 0, 10 ):
moose.setClock( i, elecDt )
for i in range( 10, 18 ):
moose.setClock( i, chemDt )
graphs = moose.Neutral( '/graphs' )
caplot = addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'somaCa', 8 )
vmplot = addPlot( '/model/elec/soma', 'getVm', 'somaVm', 8 )
ikplot = addPlot( '/model/elec/soma/K_A', 'getIk', 'KAIk', 8 )
addPlot( '/model/chem/kinetics/chan', 'getConc', 'chan', 18 )
addPlot( '/model/chem/kinetics/Ca', 'getConc', 'Ca', 18 )
addPlot( '/model/chem/kinetics/CaM', 'getConc', 'CaM', 18 )
addPlot( '/model/chem/kinetics/Ca_CaM_CaMKII', 'getConc', 'enz', 18 )
hsolve = moose.HSolve( '/model/elec/hsolve' )
hsolve.dt = elecDt
hsolve.target = '/model/elec/soma'
moose.reinit()
moose.showfield( '/model/elec/soma' )
moose.element( '/model/elec/soma' ).inject = 0e-12
moose.start( runtime )
for i in moose.wildcardFind( '/model/elec/soma/#[ISA=ChanBase]' ):
print( "{} {} {}".format( i.name, i.Gbar, i.modulation ))
moose.element( '/model/elec/soma' ).inject = 1e-12
moose.start( runtime )
moose.element( '/model/elec/soma' ).inject = 0e-12
moose.start( runtime )
moose.element( '/model/elec/soma' ).inject = -1e-12
moose.start( runtime )
moose.element( '/model/elec/soma' ).inject = 0e-12
moose.start( runtime )
fig = plt.figure( figsize = (12,10) )
t = numpy.arange( 0, caplot.vector.size, 1 ) * caplot.dt
p1 = fig.add_subplot( 411 )
p1.plot( t, caplot.vector, label="Ca elec" )
p1.legend()
p2 = fig.add_subplot( 412 )
p2.plot( t, vmplot.vector, label="Vm" )
p2.legend()
p3 = fig.add_subplot( 413 )
p3.plot( t, ikplot.vector, label="Ik for K_A" )
p3.legend()
p4 = fig.add_subplot( 414 )
for x in moose.wildcardFind( '/graphs/#[FIELD(tick)=18]' ):
t = numpy.arange( 0, x.vector.size, 1 ) * x.dt
p4.plot( t, x.vector, label=x.name )
p4.legend()
plt.show()
quit()
if __name__ == '__main__':
main()
| gpl-2.0 |
harshnisar/EvoML | evoml/subsampling/mutators.py | 2 | 2506 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Bhanu Pratap and Harsh Nisar.
This file is part of the Evoml library.
The Evoml library is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License v3 or later.
Check the licesne file recieved along with the software for further details.
"""
import random
import numpy as np
import pandas as pd
from .util import EstimatorGene
def segment_mutator_EG(individual, pool_data, indpb, private_test = False):
"""
Takes data from pool_data and mutuates existing training data
to generate new fit estimators.
Mutate can be:
- add rows from pool_data randomly
- delete rows randomly from the individual
- replace a few rows from that of df
Each action has equal probablity of happening.
Parameters
----------
individual: List of EstimatorGene
pool_data : DataFrame
Pool data from which rows are added or swapped.
indpb : float, required
Probablity with which mutuation happens on each EstimatorGene of the
individual.
"""
df_train = pool_data
for i, eg_ in enumerate(individual):
if random.random()>=indpb:
continue
# play around with tenpercent of current data.
df_ = eg_.get_data()
# Mutation can affect 5% of the dataset.
n_rows = int(0.05*pool_data.shape[0])
rnd = random.random()
if rnd<0.33:
#add rows from the main df
rows = np.random.choice(df_train.index.values, n_rows)
df_ = df_.append(df_train.ix[rows])
elif rnd<0.66:
# delete rows randomly from the individual
new_shape = df_.shape[0] - n_rows
df_ = df_.sample(n=new_shape, replace = False, axis = 0)
# df_.drop(labels=np.random.choice(df_.index, n_rows), axis=0, inplace=True)
else:
#replace a few rows
new_shape = df_.shape[0] - n_rows
df_ = df_.sample(n=new_shape, replace = False, axis = 0)
# df_.drop(labels=np.random.choice(df_.index, n_rows), axis=0, inplace=True)
rows = np.random.choice(df_train.index.values, n_rows)
df_ = df_.append(df_train.ix[rows])
## Retrain the model in EstimatorGene with new data.
eg_ = EstimatorGene(df_.iloc[:,:-1], df_.iloc[:,-1], eg_.base_estimator, private_test = private_test)
individual[i] = eg_
return (individual,)
| gpl-3.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/mpl_toolkits/axes_grid1/inset_locator.py | 6 | 18700 | """
A collection of functions and objects for creating or placing inset axes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib import docstring
from matplotlib.externals import six
from matplotlib.offsetbox import AnchoredOffsetbox
from matplotlib.patches import Patch, Rectangle
from matplotlib.path import Path
from matplotlib.transforms import Bbox, BboxTransformTo
from matplotlib.transforms import IdentityTransform, TransformedBbox
from . import axes_size as Size
from .parasite_axes import HostAxes
class InsetPosition(object):
@docstring.dedent_interpd
def __init__(self, parent, lbwh):
"""
An object for positioning an inset axes.
This is created by specifying the normalized coordinates in the axes,
instead of the figure.
Parameters
----------
parent : `matplotlib.axes.Axes`
Axes to use for normalizing coordinates.
lbwh : iterable of four floats
The left edge, bottom edge, width, and height of the inset axes, in
units of the normalized coordinate of the *parent* axes.
See Also
--------
:meth:`matplotlib.axes.Axes.set_axes_locator`
Examples
--------
The following bounds the inset axes to a box with 20%% of the parent
axes's height and 40%% of the width. The size of the axes specified
([0, 0, 1, 1]) ensures that the axes completely fills the bounding box:
>>> parent_axes = plt.gca()
>>> ax_ins = plt.axes([0, 0, 1, 1])
>>> ip = InsetPosition(ax, [0.5, 0.1, 0.4, 0.2])
>>> ax_ins.set_axes_locator(ip)
"""
self.parent = parent
self.lbwh = lbwh
def __call__(self, ax, renderer):
bbox_parent = self.parent.get_position(original=False)
trans = BboxTransformTo(bbox_parent)
bbox_inset = Bbox.from_bounds(*self.lbwh)
bb = TransformedBbox(bbox_inset, trans)
return bb
class AnchoredLocatorBase(AnchoredOffsetbox):
def __init__(self, bbox_to_anchor, offsetbox, loc,
borderpad=0.5, bbox_transform=None):
super(AnchoredLocatorBase, self).__init__(
loc, pad=0., child=None, borderpad=borderpad,
bbox_to_anchor=bbox_to_anchor, bbox_transform=bbox_transform
)
def draw(self, renderer):
raise RuntimeError("No draw method should be called")
def __call__(self, ax, renderer):
self.axes = ax
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, 0, 0, renderer)
bbox_canvas = Bbox.from_bounds(px, py, width, height)
tr = ax.figure.transFigure.inverted()
bb = TransformedBbox(bbox_canvas, tr)
return bb
class AnchoredSizeLocator(AnchoredLocatorBase):
def __init__(self, bbox_to_anchor, x_size, y_size, loc,
borderpad=0.5, bbox_transform=None):
super(AnchoredSizeLocator, self).__init__(
bbox_to_anchor, None, loc,
borderpad=borderpad, bbox_transform=bbox_transform
)
self.x_size = Size.from_any(x_size)
self.y_size = Size.from_any(y_size)
def get_extent(self, renderer):
x, y, w, h = self.get_bbox_to_anchor().bounds
dpi = renderer.points_to_pixels(72.)
r, a = self.x_size.get_size(renderer)
width = w*r + a*dpi
r, a = self.y_size.get_size(renderer)
height = h*r + a*dpi
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return width+2*pad, height+2*pad, xd+pad, yd+pad
class AnchoredZoomLocator(AnchoredLocatorBase):
def __init__(self, parent_axes, zoom, loc,
borderpad=0.5,
bbox_to_anchor=None,
bbox_transform=None):
self.parent_axes = parent_axes
self.zoom = zoom
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
super(AnchoredZoomLocator, self).__init__(
bbox_to_anchor, None, loc, borderpad=borderpad,
bbox_transform=bbox_transform)
def get_extent(self, renderer):
bb = TransformedBbox(self.axes.viewLim,
self.parent_axes.transData)
x, y, w, h = bb.bounds
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w*self.zoom+2*pad, h*self.zoom+2*pad, xd+pad, yd+pad
class BboxPatch(Patch):
@docstring.dedent_interpd
def __init__(self, bbox, **kwargs):
"""
Patch showing the shape bounded by a Bbox.
Parameters
----------
bbox : `matplotlib.transforms.Bbox`
Bbox to use for the extents of this patch.
**kwargs
Patch properties. Valid arguments include:
%(Patch)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox = bbox
def get_path(self):
x0, y0, x1, y1 = self.bbox.extents
verts = [(x0, y0),
(x1, y0),
(x1, y1),
(x0, y1),
(x0, y0),
(0, 0)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY]
return Path(verts, codes)
get_path.__doc__ = Patch.get_path.__doc__
class BboxConnector(Patch):
@staticmethod
def get_bbox_edge_pos(bbox, loc):
"""
Helper function to obtain the location of a corner of a bbox
Parameters
----------
bbox : `matplotlib.transforms.Bbox`
loc : {1, 2, 3, 4}
Corner of *bbox*. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
Returns
-------
x, y : float
Coordinates of the corner specified by *loc*.
"""
x0, y0, x1, y1 = bbox.extents
if loc == 1:
return x1, y1
elif loc == 2:
return x0, y1
elif loc == 3:
return x0, y0
elif loc == 4:
return x1, y0
@staticmethod
def connect_bbox(bbox1, bbox2, loc1, loc2=None):
"""
Helper function to obtain a Path from one bbox to another.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1 : {1, 2, 3, 4}
Corner of *bbox1* to use. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc2 : {1, 2, 3, 4}, optional
Corner of *bbox2* to use. If None, defaults to *loc1*.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
Returns
-------
path : `matplotlib.path.Path`
A line segment from the *loc1* corner of *bbox1* to the *loc2*
corner of *bbox2*.
"""
if isinstance(bbox1, Rectangle):
transform = bbox1.get_transfrom()
bbox1 = Bbox.from_bounds(0, 0, 1, 1)
bbox1 = TransformedBbox(bbox1, transform)
if isinstance(bbox2, Rectangle):
transform = bbox2.get_transform()
bbox2 = Bbox.from_bounds(0, 0, 1, 1)
bbox2 = TransformedBbox(bbox2, transform)
if loc2 is None:
loc2 = loc1
x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
verts = [[x1, y1], [x2, y2]]
codes = [Path.MOVETO, Path.LINETO]
return Path(verts, codes)
@docstring.dedent_interpd
def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
"""
Connect two bboxes with a straight line.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1 : {1, 2, 3, 4}
Corner of *bbox1* to draw the line. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc2 : {1, 2, 3, 4}, optional
Corner of *bbox2* to draw the line. If None, defaults to *loc1*.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
**kwargs
Patch properties for the line drawn. Valid arguments include:
%(Patch)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox1 = bbox1
self.bbox2 = bbox2
self.loc1 = loc1
self.loc2 = loc2
def get_path(self):
return self.connect_bbox(self.bbox1, self.bbox2,
self.loc1, self.loc2)
get_path.__doc__ = Patch.get_path.__doc__
class BboxConnectorPatch(BboxConnector):
@docstring.dedent_interpd
def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
"""
Connect two bboxes with a quadrilateral.
The quadrilateral is specified by two lines that start and end at corners
of the bboxes. The four sides of the quadrilateral are defined by the two
lines given, the line between the two corners specified in *bbox1* and the
line between the two corners specified in *bbox2*.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1a, loc2a : {1, 2, 3, 4}
Corners of *bbox1* and *bbox2* to draw the first line.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc1b, loc2b : {1, 2, 3, 4}
Corners of *bbox1* and *bbox2* to draw the second line.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
**kwargs
Patch properties for the line drawn:
%(Patch)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
BboxConnector.__init__(self, bbox1, bbox2, loc1a, loc2a, **kwargs)
self.loc1b = loc1b
self.loc2b = loc2b
def get_path(self):
path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)
path2 = self.connect_bbox(self.bbox2, self.bbox1,
self.loc2b, self.loc1b)
path_merged = (list(path1.vertices) +
list(path2.vertices) +
[path1.vertices[0]])
return Path(path_merged)
get_path.__doc__ = BboxConnector.get_path.__doc__
def _add_inset_axes(parent_axes, inset_axes):
"""Helper function to add an inset axes and disable navigation in it"""
parent_axes.figure.add_axes(inset_axes)
inset_axes.set_navigate(False)
@docstring.dedent_interpd
def inset_axes(parent_axes, width, height, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
borderpad=0.5):
"""
Create an inset axes with a given width and height.
Both sizes used can be specified either in inches or percentage of the
parent axes.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes to place the inset axes.
width, height : float or str
Size of the inset axes to create.
loc : int or string, optional, default to 1
Location to place the inset axes. The valid locations are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored. Can be a tuple of
[left, bottom, width, height], or a tuple of [left, bottom].
bbox_transform : `matplotlib.transforms.Transform`, optional
Transformation for the bbox. if None, `parent_axes.transAxes` is used.
axes_class : `matplotlib.axes.Axes` type, optional
If specified, the inset axes created with be created with this class's
constructor.
axes_kwargs : dict, optional
Keyworded arguments to pass to the constructor of the inset axes.
Valid arguments include:
%(Axes)s
borderpad : float, optional
Padding between inset axes and the bbox_to_anchor. Defaults to 0.5.
Returns
-------
inset_axes : `axes_class`
Inset axes object created.
"""
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
axes_locator = AnchoredSizeLocator(bbox_to_anchor,
width, height,
loc=loc,
bbox_transform=bbox_transform,
borderpad=borderpad)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
@docstring.dedent_interpd
def zoomed_inset_axes(parent_axes, zoom, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
borderpad=0.5):
"""
Create an anchored inset axes by scaling a parent axes.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes to place the inset axes.
zoom : float
Scaling factor of the data axes. *zoom* > 1 will enlargen the
coordinates (i.e., "zoomed in"), while *zoom* < 1 will shrink the
coordinates (i.e., "zoomed out").
loc : int or string, optional, default to 1
Location to place the inset axes. The valid locations are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored. Can be a tuple of
[left, bottom, width, height], or a tuple of [left, bottom].
bbox_transform : `matplotlib.transforms.Transform`, optional
Transformation for the bbox. if None, `parent_axes.transAxes` is used.
axes_class : `matplotlib.axes.Axes` type, optional
If specified, the inset axes created with be created with this class's
constructor.
axes_kwargs : dict, optional
Keyworded arguments to pass to the constructor of the inset axes.
Valid arguments include:
%(Axes)s
borderpad : float, optional
Padding between inset axes and the bbox_to_anchor. Defaults to 0.5.
Returns
-------
inset_axes : `axes_class`
Inset axes object created.
"""
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
axes_locator = AnchoredZoomLocator(parent_axes, zoom=zoom, loc=loc,
bbox_to_anchor=bbox_to_anchor,
bbox_transform=bbox_transform,
borderpad=borderpad)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
@docstring.dedent_interpd
def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
"""
Draw a box to mark the location of an area represented by an inset axes.
This function draws a box in *parent_axes* at the bounding box of
*inset_axes*, and shows a connection with the inset axes by drawing lines
at the corners, giving a "zoomed in" effect.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes which contains the area of the inset axes.
inset_axes : `matplotlib.axes.Axes`
The inset axes.
loc1, loc2 : {1, 2, 3, 4}
Corners to use for connecting the inset axes and the area in the
parent axes.
**kwargs
Patch properties for the lines and box drawn:
%(Patch)s
Returns
-------
pp : `matplotlib.patches.Patch`
The patch drawn to represent the area of the inset axes.
p1, p2 : `matplotlib.patches.Patch`
The patches connecting two corners of the inset axes and its area.
"""
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
pp = BboxPatch(rect, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2
| mit |
saintdragon2/python-3-lecture-2015 | civil-final/1st_presentation/4조/civil0531.py | 1 | 2834 | __author__ = 'civil'
import tkinter as tk
import matplotlib
from tkinter import Tk, Menu, Toplevel, Button
from tkinter.filedialog import askopenfilename, asksaveasfile
from tkinter.messagebox import showerror
def donothing():
filewin = Toplevel(root)
button = Button(filewin, text="Do nothing button")
button.pack()
def open_cal():
cal_open = open('scott0012.py')
cal = cal_open.read()
print(cal)
def load_file():
fname = askopenfilename(filetypes=(("Template files", "*.tplate"),
("HTML files", "*.html;*.htm"),
("All files", "*.*"),
("Python files", "*.py")))
if fname:
try:
# print("""here it comes: self.settings["template"].set(fname)""")
print(fname)
text_file = open(fname)
text = text_file.read()
print(text)
except: # <- naked except is a bad idea
showerror("Open Source File", "Failed to read file\n'%s'" % fname)
return
def write_file():
file_to_write= asksaveasfile(mode='w', defaultextension=".txt")
if file_to_write is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
# text2save = str(text.get(1.0, END)) # starts from `1.0`, not `0.0`
file_to_write.write('haha')
file_to_write.close() # `()` was missing
root = Tk()
menubar = Menu(root)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='New', command=donothing)
filemenu.add_command(label='Open', command=load_file)
filemenu.add_command(label='Save', command=donothing)
filemenu.add_command(label='Save as ...', command=write_file)
filemenu.add_command(label='Close', command=donothing)
filemenu.add_separator()
filemenu.add_command(label='Exit', command=root.quit)
menubar.add_cascade(label='File', menu=filemenu)
editmenu = Menu(menubar, tearoff=0)
editmenu.add_command(label='Undo', command=donothing)
editmenu.add_separator()
editmenu.add_command(label='Cut', command=donothing)
editmenu.add_command(label='Copy', command=donothing)
editmenu.add_command(label='Paste', command=donothing)
editmenu.add_command(label='Delete', command=donothing)
editmenu.add_command(label='Select All', command=donothing)
menubar.add_cascade(label='Edit', menu=editmenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label='Help Index', command=donothing)
helpmenu.add_command(label='About ...', command=donothing)
menubar.add_cascade(label='Help', menu=helpmenu)
filewin = root
button = Button(filewin, text="graph")
button.place(x=50, y=50)
button2 = Button(filewin, text="calculator", command=open_cal)
button2.place(x=40, y=50)
button.pack()
button2.pack()
root.config()
root.config(menu=menubar)
root.mainloop() | mit |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/util/test_hashing.py | 12 | 12908 | import pytest
import datetime
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Index, MultiIndex
from pandas.util import hash_array, hash_pandas_object
from pandas.core.util.hashing import hash_tuples, hash_tuple, _hash_scalar
import pandas.util.testing as tm
class TestHashing(object):
def setup_method(self, method):
self.df = DataFrame(
{'i32': np.array([1, 2, 3] * 3, dtype='int32'),
'f32': np.array([None, 2.5, 3.5] * 3, dtype='float32'),
'cat': Series(['a', 'b', 'c'] * 3).astype('category'),
'obj': Series(['d', 'e', 'f'] * 3),
'bool': np.array([True, False, True] * 3),
'dt': Series(pd.date_range('20130101', periods=9)),
'dt_tz': Series(pd.date_range('20130101', periods=9,
tz='US/Eastern')),
'td': Series(pd.timedelta_range('2000', periods=9))})
def test_consistency(self):
# check that our hash doesn't change because of a mistake
# in the actual code; this is the ground truth
result = hash_pandas_object(Index(['foo', 'bar', 'baz']))
expected = Series(np.array([3600424527151052760, 1374399572096150070,
477881037637427054], dtype='uint64'),
index=['foo', 'bar', 'baz'])
tm.assert_series_equal(result, expected)
def test_hash_array(self):
for name, s in self.df.iteritems():
a = s.values
tm.assert_numpy_array_equal(hash_array(a), hash_array(a))
def test_hash_array_mixed(self):
result1 = hash_array(np.array([3, 4, 'All']))
result2 = hash_array(np.array(['3', '4', 'All']))
result3 = hash_array(np.array([3, 4, 'All'], dtype=object))
tm.assert_numpy_array_equal(result1, result2)
tm.assert_numpy_array_equal(result1, result3)
def test_hash_array_errors(self):
for val in [5, 'foo', pd.Timestamp('20130101')]:
pytest.raises(TypeError, hash_array, val)
def check_equal(self, obj, **kwargs):
a = hash_pandas_object(obj, **kwargs)
b = hash_pandas_object(obj, **kwargs)
tm.assert_series_equal(a, b)
kwargs.pop('index', None)
a = hash_pandas_object(obj, **kwargs)
b = hash_pandas_object(obj, **kwargs)
tm.assert_series_equal(a, b)
def check_not_equal_with_index(self, obj):
# check that we are not hashing the same if
# we include the index
if not isinstance(obj, Index):
a = hash_pandas_object(obj, index=True)
b = hash_pandas_object(obj, index=False)
if len(obj):
assert not (a == b).all()
def test_hash_tuples(self):
tups = [(1, 'one'), (1, 'two'), (2, 'one')]
result = hash_tuples(tups)
expected = hash_pandas_object(MultiIndex.from_tuples(tups)).values
tm.assert_numpy_array_equal(result, expected)
result = hash_tuples(tups[0])
assert result == expected[0]
def test_hash_tuple(self):
# test equivalence between hash_tuples and hash_tuple
for tup in [(1, 'one'), (1, np.nan), (1.0, pd.NaT, 'A'),
('A', pd.Timestamp("2012-01-01"))]:
result = hash_tuple(tup)
expected = hash_tuples([tup])[0]
assert result == expected
def test_hash_scalar(self):
for val in [1, 1.4, 'A', b'A', u'A', pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-01", tz='Europe/Brussels'),
datetime.datetime(2012, 1, 1),
pd.Timestamp("2012-01-01", tz='EST').to_pydatetime(),
pd.Timedelta('1 days'), datetime.timedelta(1),
pd.Period('2012-01-01', freq='D'), pd.Interval(0, 1),
np.nan, pd.NaT, None]:
result = _hash_scalar(val)
expected = hash_array(np.array([val], dtype=object),
categorize=True)
assert result[0] == expected[0]
def test_hash_tuples_err(self):
for val in [5, 'foo', pd.Timestamp('20130101')]:
pytest.raises(TypeError, hash_tuples, val)
def test_multiindex_unique(self):
mi = MultiIndex.from_tuples([(118, 472), (236, 118),
(51, 204), (102, 51)])
assert mi.is_unique
result = hash_pandas_object(mi)
assert result.is_unique
def test_multiindex_objects(self):
mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]],
labels=[[0, 1, 0, 2], [2, 0, 0, 1]],
names=['col1', 'col2'])
recons = mi._sort_levels_monotonic()
# these are equal
assert mi.equals(recons)
assert Index(mi.values).equals(Index(recons.values))
# _hashed_values and hash_pandas_object(..., index=False)
# equivalency
expected = hash_pandas_object(
mi, index=False).values
result = mi._hashed_values
tm.assert_numpy_array_equal(result, expected)
expected = hash_pandas_object(
recons, index=False).values
result = recons._hashed_values
tm.assert_numpy_array_equal(result, expected)
expected = mi._hashed_values
result = recons._hashed_values
# values should match, but in different order
tm.assert_numpy_array_equal(np.sort(result),
np.sort(expected))
def test_hash_pandas_object(self):
for obj in [Series([1, 2, 3]),
Series([1.0, 1.5, 3.2]),
Series([1.0, 1.5, np.nan]),
Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
Series(['a', 'b', 'c']),
Series(['a', np.nan, 'c']),
Series(['a', None, 'c']),
Series([True, False, True]),
Series(),
Index([1, 2, 3]),
Index([True, False, True]),
DataFrame({'x': ['a', 'b', 'c'], 'y': [1, 2, 3]}),
DataFrame(),
tm.makeMissingDataframe(),
tm.makeMixedDataFrame(),
tm.makeTimeDataFrame(),
tm.makeTimeSeries(),
tm.makeTimedeltaIndex(),
tm.makePeriodIndex(),
Series(tm.makePeriodIndex()),
Series(pd.date_range('20130101',
periods=3, tz='US/Eastern')),
MultiIndex.from_product(
[range(5),
['foo', 'bar', 'baz'],
pd.date_range('20130101', periods=2)]),
MultiIndex.from_product(
[pd.CategoricalIndex(list('aabc')),
range(3)])]:
self.check_equal(obj)
self.check_not_equal_with_index(obj)
def test_hash_pandas_object2(self):
for name, s in self.df.iteritems():
self.check_equal(s)
self.check_not_equal_with_index(s)
def test_hash_pandas_empty_object(self):
for obj in [Series([], dtype='float64'),
Series([], dtype='object'),
Index([])]:
self.check_equal(obj)
# these are by-definition the same with
# or w/o the index as the data is empty
def test_categorical_consistency(self):
# GH15143
# Check that categoricals hash consistent with their values, not codes
# This should work for categoricals of any dtype
for s1 in [Series(['a', 'b', 'c', 'd']),
Series([1000, 2000, 3000, 4000]),
Series(pd.date_range(0, periods=4))]:
s2 = s1.astype('category').cat.set_categories(s1)
s3 = s2.cat.set_categories(list(reversed(s1)))
for categorize in [True, False]:
# These should all hash identically
h1 = hash_pandas_object(s1, categorize=categorize)
h2 = hash_pandas_object(s2, categorize=categorize)
h3 = hash_pandas_object(s3, categorize=categorize)
tm.assert_series_equal(h1, h2)
tm.assert_series_equal(h1, h3)
def test_categorical_with_nan_consistency(self):
c = pd.Categorical.from_codes(
[-1, 0, 1, 2, 3, 4],
categories=pd.date_range('2012-01-01', periods=5, name='B'))
expected = hash_array(c, categorize=False)
c = pd.Categorical.from_codes(
[-1, 0],
categories=[pd.Timestamp('2012-01-01')])
result = hash_array(c, categorize=False)
assert result[0] in expected
assert result[1] in expected
def test_pandas_errors(self):
for obj in [pd.Timestamp('20130101')]:
with pytest.raises(TypeError):
hash_pandas_object(obj)
with catch_warnings(record=True):
obj = tm.makePanel()
with pytest.raises(TypeError):
hash_pandas_object(obj)
def test_hash_keys(self):
# using different hash keys, should have different hashes
# for the same data
# this only matters for object dtypes
obj = Series(list('abc'))
a = hash_pandas_object(obj, hash_key='9876543210123456')
b = hash_pandas_object(obj, hash_key='9876543210123465')
assert (a != b).all()
def test_invalid_key(self):
# this only matters for object dtypes
def f():
hash_pandas_object(Series(list('abc')), hash_key='foo')
pytest.raises(ValueError, f)
def test_alread_encoded(self):
# if already encoded then ok
obj = Series(list('abc')).str.encode('utf8')
self.check_equal(obj)
def test_alternate_encoding(self):
obj = Series(list('abc'))
self.check_equal(obj, encoding='ascii')
def test_same_len_hash_collisions(self):
for l in range(8):
length = 2**(l + 8) + 1
s = tm.rands_array(length, 2)
result = hash_array(s, 'utf8')
assert not result[0] == result[1]
for l in range(8):
length = 2**(l + 8)
s = tm.rands_array(length, 2)
result = hash_array(s, 'utf8')
assert not result[0] == result[1]
def test_hash_collisions(self):
# hash collisions are bad
# https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726
L = ['Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9', # noqa
'Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe'] # noqa
# these should be different!
result1 = hash_array(np.asarray(L[0:1], dtype=object), 'utf8')
expected1 = np.array([14963968704024874985], dtype=np.uint64)
tm.assert_numpy_array_equal(result1, expected1)
result2 = hash_array(np.asarray(L[1:2], dtype=object), 'utf8')
expected2 = np.array([16428432627716348016], dtype=np.uint64)
tm.assert_numpy_array_equal(result2, expected2)
result = hash_array(np.asarray(L, dtype=object), 'utf8')
tm.assert_numpy_array_equal(
result, np.concatenate([expected1, expected2], axis=0))
def test_deprecation():
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
from pandas.tools.hashing import hash_pandas_object
obj = Series(list('abc'))
hash_pandas_object(obj, hash_key='9876543210123456')
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
from pandas.tools.hashing import hash_array
obj = np.array([1, 2, 3])
hash_array(obj, hash_key='9876543210123456')
| mit |
Odingod/mne-python | mne/viz/evoked.py | 6 | 25091 | """Functions to make simple plot on evoked M/EEG data (besides topographies)
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Cathy Nangini <[email protected]>
# Mainak Jas <[email protected]>
#
# License: Simplified BSD
from itertools import cycle
import numpy as np
from ..io.pick import channel_type, pick_types, _picks_by_type
from ..externals.six import string_types
from ..defaults import _handle_default
from .utils import _draw_proj_checkbox, tight_layout, _check_delayed_ssp
from ..utils import logger
from ..fixes import partial
from ..io.pick import pick_info
def _butterfly_onpick(event, params):
"""Helper to add a channel name on click"""
params['need_draw'] = True
ax = event.artist.get_axes()
ax_idx = np.where([ax is a for a in params['axes']])[0][0]
lidx = np.where([l is event.artist for l in params['lines'][ax_idx]])[0][0]
ch_name = params['ch_names'][params['idxs'][ax_idx][lidx]]
text = params['texts'][ax_idx]
x = event.artist.get_xdata()[event.ind[0]]
y = event.artist.get_ydata()[event.ind[0]]
text.set_x(x)
text.set_y(y)
text.set_text(ch_name)
text.set_color(event.artist.get_color())
text.set_alpha(1.)
text.set_path_effects(params['path_effects'])
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use on_button_press (happens once per click)
# to do the drawing
def _butterfly_on_button_press(event, params):
"""Helper to only draw once for picking"""
if params['need_draw']:
event.canvas.draw()
else:
idx = np.where([event.inaxes is ax for ax in params['axes']])[0]
if len(idx) == 1:
text = params['texts'][idx[0]]
text.set_alpha(0.)
text.set_path_effects([])
event.canvas.draw()
params['need_draw'] = False
def _plot_evoked(evoked, picks, exclude, unit, show,
ylim, proj, xlim, hline, units,
scalings, titles, axes, plot_type,
cmap=None):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings)
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
from matplotlib import patheffects
if axes is not None and proj == 'interactive':
raise RuntimeError('Currently only single axis figures are supported'
' for interactive SSP selection.')
scalings = _handle_default('scalings', scalings)
titles = _handle_default('titles', titles)
units = _handle_default('units', units)
channel_types = set(key for d in [scalings, titles, units] for key in d)
channel_types = sorted(channel_types) # to guarantee consistent order
if picks is None:
picks = list(range(evoked.info['nchan']))
bad_ch_idx = [evoked.ch_names.index(ch) for ch in evoked.info['bads']
if ch in evoked.ch_names]
if len(exclude) > 0:
if isinstance(exclude, string_types) and exclude == 'bads':
exclude = bad_ch_idx
elif (isinstance(exclude, list) and
all(isinstance(ch, string_types) for ch in exclude)):
exclude = [evoked.ch_names.index(ch) for ch in exclude]
else:
raise ValueError('exclude has to be a list of channel names or '
'"bads"')
picks = list(set(picks).difference(exclude))
picks = np.array(picks)
types = np.array([channel_type(evoked.info, idx) for idx in picks])
n_channel_types = 0
ch_types_used = []
for t in channel_types:
if t in types:
n_channel_types += 1
ch_types_used.append(t)
axes_init = axes # remember if axes where given as input
fig = None
if axes is None:
fig, axes = plt.subplots(n_channel_types, 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if axes_init is not None:
fig = axes[0].get_figure()
if not len(axes) == n_channel_types:
raise ValueError('Number of axes (%g) must match number of channel '
'types (%g)' % (len(axes), n_channel_types))
# instead of projecting during each iteration let's use the mixin here.
if proj is True and evoked.proj is not True:
evoked = evoked.copy()
evoked.apply_proj()
times = 1e3 * evoked.times # time in miliseconds
texts = []
idxs = []
lines = []
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
for ax, t in zip(axes, ch_types_used):
ch_unit = units[t]
this_scaling = scalings[t]
if unit is False:
this_scaling = 1.0
ch_unit = 'NA' # no unit
idx = list(picks[types == t])
idxs.append(idx)
if len(idx) > 0:
# Parameters for butterfly interactive plots
if plot_type == 'butterfly':
if any(i in bad_ch_idx for i in idx):
colors = ['k'] * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = 'r'
ax._get_lines.color_cycle = iter(colors)
else:
ax._get_lines.color_cycle = cycle(['k'])
# Set amplitude scaling
D = this_scaling * evoked.data[idx, :]
if plot_type == 'butterfly':
lines.append(ax.plot(times, D.T, picker=3., zorder=0))
for ii, line in zip(idx, lines[-1]):
if ii in bad_ch_idx:
line.set_zorder(1)
ax.set_ylabel('data (%s)' % ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(ax.text(0, 0, 'blank', zorder=2,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
elif plot_type == 'image':
im = ax.imshow(D, interpolation='nearest', origin='lower',
extent=[times[0], times[-1], 0, D.shape[0]],
aspect='auto', cmap=cmap)
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
ax.set_ylabel('channels (%s)' % 'index')
else:
raise ValueError("plot_type has to be 'butterfly' or 'image'."
"Got %s." % plot_type)
if xlim is not None:
if xlim == 'tight':
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and t in ylim:
if plot_type == 'butterfly':
ax.set_ylim(ylim[t])
elif plot_type == 'image':
im.set_clim(ylim[t])
ax.set_title(titles[t] + ' (%d channel%s)' % (
len(D), 's' if len(D) > 1 else ''))
ax.set_xlabel('time (ms)')
if (plot_type == 'butterfly') and (hline is not None):
for h in hline:
ax.axhline(h, color='r', linestyle='--', linewidth=2)
if plot_type == 'butterfly':
params = dict(axes=axes, texts=texts, lines=lines,
ch_names=evoked.ch_names, idxs=idxs, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
if axes_init is None:
plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
axes=axes, types=types, units=units, scalings=scalings,
unit=unit, ch_types_used=ch_types_used, picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type)
_draw_proj_checkbox(None, params)
if show and plt.get_backend() != 'agg':
plt.show()
fig.canvas.draw() # for axes plots update axes.
tight_layout(fig=fig)
return fig
def plot_evoked(evoked, picks=None, exclude='bads', unit=True, show=True,
ylim=None, xlim='tight', proj=False, hline=None, units=None,
scalings=None, titles=None, axes=None):
"""Plot evoked data
Note: If bad channels are not excluded they are shown in red.
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
ylim : dict | None
ylim for plots. e.g. ylim = dict(eeg=[-200e-6, 200e6])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
hline : list of floats | None
The values at which to show an horizontal line.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=ylim, proj=proj, xlim=xlim,
hline=hline, units=units, scalings=scalings,
titles=titles, axes=axes, plot_type="butterfly")
def plot_evoked_image(evoked, picks=None, exclude='bads', unit=True, show=True,
clim=None, xlim='tight', proj=False, units=None,
scalings=None, titles=None, axes=None, cmap='RdBu_r'):
"""Plot evoked data as images
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
clim : dict | None
clim for plots. e.g. clim = dict(eeg=[-200e-6, 200e6])
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap
Colormap.
"""
return _plot_evoked(evoked=evoked, picks=picks, exclude=exclude, unit=unit,
show=show, ylim=clim, proj=proj, xlim=xlim,
hline=None, units=units, scalings=scalings,
titles=titles, axes=axes, plot_type="image",
cmap=cmap)
def _plot_update_evoked(params, bools):
""" update the plot evoked lines
"""
picks, evoked = [params[k] for k in ('picks', 'evoked')]
times = evoked.times * 1e3
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = evoked.copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
for ax, t in zip(params['axes'], params['ch_types_used']):
this_scaling = params['scalings'][t]
idx = [picks[i] for i in range(len(picks)) if params['types'][i] == t]
D = this_scaling * new_evoked.data[idx, :]
if params['plot_type'] == 'butterfly':
[line.set_data(times, di) for line, di in zip(ax.lines, D)]
else:
ax.images[0].set_data(D)
params['fig'].canvas.draw()
def plot_evoked_white(evoked, noise_cov, show=True):
"""Plot whitened evoked response
Plots the whitened evoked response and the whitened GFP as described in
[1]. If one single covariance object is passed, the GFP panel (bottom)
will depict different sensor types. If multiple covariance objects are
passed as a list, the left column will display the whitened evoked
responses for each channel based on the whitener from the noise covariance
that has the highest log-likelihood. The left column will depict the
whitened GFPs based on each estimator separately for each sensor type.
Instead of numbers of channels the GFP display shows the estimated rank.
Note. The rank estimation will be printed by the logger for each noise
covariance estimator that is passed.
Parameters
----------
evoked : instance of mne.Evoked
The evoked response.
noise_cov : list | instance of Covariance
The noise covariance as computed by ``mne.cov.compute_covariance``.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
References
----------
[1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG signals,
vol. 108, 328-342, NeuroImage.
"""
return _plot_evoked_white(evoked=evoked, noise_cov=noise_cov,
scalings=None, rank=None, show=show)
def _plot_evoked_white(evoked, noise_cov, scalings=None, rank=None, show=True):
"""helper to plot_evoked_white
Additional Paramter
-------------------
scalings : dict | None
The rescaling method to be applied to improve the accuracy of rank
estimaiton. If dict, it will override the following default values
(used if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
Note. Theses values were tested on different datests across various
conditions. You should not need to update them.
rank : dict of int | None
Dict of ints where keys are 'eeg', 'mag' or 'grad'. If None,
the rank is detected automatically. Defaults to None. Note.
The rank estimation will be printed by the logger for each noise
covariance estimator that is passed.
"""
from ..cov import whiten_evoked # recursive import
from ..cov import _estimate_rank_meeg_cov
import matplotlib.pyplot as plt
if scalings is None:
scalings = dict(mag=1e12, grad=1e11, eeg=1e5)
ch_used = [ch for ch in ['eeg', 'grad', 'mag'] if ch in evoked]
has_meg = 'mag' in ch_used and 'grad' in ch_used
if not isinstance(noise_cov, (list, tuple)):
noise_cov = [noise_cov]
proc_history = evoked.info.get('proc_history', [])
has_sss = False
if len(proc_history) > 0:
# if SSSed, mags and grad are not longer independent
# for correct display of the whitening we will drop the cross-terms
# (the gradiometer * magnetometer covariance)
has_sss = 'max_info' in proc_history[0] and has_meg
if has_sss:
logger.info('SSS has been applied to data. Showing mag and grad '
'whitening jointly.')
evoked = evoked.copy() # handle ref meg
evoked.info['projs'] = [] # either applied already or not-- else issue
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
evoked.pick_channels([evoked.ch_names[k] for k in picks], copy=False)
# important to re-pick. will otherwise crash on systems with ref channels
# as first sensor block
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
picks_list = _picks_by_type(evoked.info, meg_combined=has_sss)
if has_meg and has_sss:
# reduce ch_used to combined mag grad
ch_used = list(zip(*picks_list))[0]
# order pick list by ch_used (required for compat with plot_evoked)
picks_list = [x for x, y in sorted(zip(picks_list, ch_used))]
n_ch_used = len(ch_used)
# make sure we use the same rank estimates for GFP and whitening
rank_list = []
for cov in noise_cov:
rank_ = {}
C = cov['data'].copy()
picks_list2 = [k for k in picks_list]
if rank is None:
if has_meg and not has_sss:
picks_list2 += _picks_by_type(evoked.info,
meg_combined=True)
for ch_type, this_picks in picks_list2:
this_info = pick_info(evoked.info, this_picks)
idx = np.ix_(this_picks, this_picks)
this_rank = _estimate_rank_meeg_cov(C[idx], this_info,
scalings)
rank_[ch_type] = this_rank
if rank is not None:
rank_.update(rank)
rank_list.append(rank_)
evokeds_white = [whiten_evoked(evoked, n, picks, rank=r)
for n, r in zip(noise_cov, rank_list)]
axes_evoked = None
def whitened_gfp(x, rank=None):
"""Whitened Global Field Power
The MNE inverse solver assumes zero mean whitened data as input.
Therefore, a chi^2 statistic will be best to detect model violations.
"""
return np.sum(x ** 2, axis=0) / (len(x) if rank is None else rank)
# prepare plot
if len(noise_cov) > 1:
n_columns = 2
n_extra_row = 0
else:
n_columns = 1
n_extra_row = 1
n_rows = n_ch_used + n_extra_row
fig, axes = plt.subplots(n_rows,
n_columns, sharex=True, sharey=False,
figsize=(8.8, 2.2 * n_rows))
if n_columns > 1:
suptitle = ('Whitened evoked (left, best estimator = "%s")\n'
'and global field power '
'(right, comparison of estimators)' %
noise_cov[0].get('method', 'empirical'))
fig.suptitle(suptitle)
ax_gfp = None
if any(((n_columns == 1 and n_ch_used == 1),
(n_columns == 1 and n_ch_used > 1),
(n_columns == 2 and n_ch_used == 1))):
axes_evoked = axes[:n_ch_used]
ax_gfp = axes[-1:]
elif n_columns == 2 and n_ch_used > 1:
axes_evoked = axes[:n_ch_used, 0]
ax_gfp = axes[:, 1]
else:
raise RuntimeError('Wrong axes inputs')
times = evoked.times * 1e3
titles_ = _handle_default('titles')
if has_sss:
titles_['meg'] = 'MEG (combined)'
colors = [plt.cm.Set1(i) for i in np.linspace(0, 0.5, len(noise_cov))]
ch_colors = {'eeg': 'black', 'mag': 'blue', 'grad': 'cyan',
'meg': 'steelblue'}
iter_gfp = zip(evokeds_white, noise_cov, rank_list, colors)
if not has_sss:
evokeds_white[0].plot(unit=False, axes=axes_evoked,
hline=[-1.96, 1.96], show=False)
else:
for ((ch_type, picks), ax) in zip(picks_list, axes_evoked):
ax.plot(times, evokeds_white[0].data[picks].T, color='k')
for hline in [-1.96, 1.96]:
ax.axhline(hline, color='red', linestyle='--')
# Now plot the GFP
for evoked_white, noise_cov, rank_, color in iter_gfp:
i = 0
for ch, sub_picks in picks_list:
this_rank = rank_[ch]
title = '{0} ({2}{1})'.format(
titles_[ch] if n_columns > 1 else ch,
this_rank, 'rank ' if n_columns > 1 else '')
label = noise_cov.get('method', 'empirical')
ax_gfp[i].set_title(title if n_columns > 1 else
'whitened global field power (GFP),'
' method = "%s"' % label)
data = evoked_white.data[sub_picks]
gfp = whitened_gfp(data, rank=this_rank)
ax_gfp[i].plot(times, gfp,
label=(label if n_columns > 1 else title),
color=color if n_columns > 1 else ch_colors[ch])
ax_gfp[i].set_xlabel('times [ms]')
ax_gfp[i].set_ylabel('GFP [chi^2]')
ax_gfp[i].set_xlim(times[0], times[-1])
ax_gfp[i].set_ylim(0, 10)
ax_gfp[i].axhline(1, color='red', linestyle='--')
if n_columns > 1:
i += 1
ax = ax_gfp[0]
if n_columns == 1:
ax.legend( # mpl < 1.2.1 compatibility: use prop instead of fontsize
loc='upper right', bbox_to_anchor=(0.98, 0.9), prop=dict(size=12))
else:
ax.legend(loc='upper right', prop=dict(size=10))
params = dict(top=[0.69, 0.82, 0.87][n_rows - 1],
bottom=[0.22, 0.13, 0.09][n_rows - 1])
if has_sss:
params['hspace'] = 0.49
fig.subplots_adjust(**params)
fig.canvas.draw()
if show is True:
plt.show()
return fig
def plot_snr_estimate(evoked, inv, show=True):
"""Plot a data SNR estimate
Parameters
----------
evoked : instance of Evoked
The evoked instance. This should probably be baseline-corrected.
inv : instance of InverseOperator
The minimum-norm inverse operator.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
from ..minimum_norm import estimate_snr
snr, snr_est = estimate_snr(evoked, inv, verbose=True)
fig, ax = plt.subplots(1, 1)
lims = np.concatenate([evoked.times[[0, -1]], [-1, snr_est.max()]])
ax.plot([0, 0], lims[2:], 'k:')
ax.plot(lims[:2], [0, 0], 'k:')
# Colors are "bluish green" and "vermillion" taken from:
# http://bconnelly.net/2013/10/creating-colorblind-friendly-figures/
ax.plot(evoked.times, snr_est, color=[0.0, 0.6, 0.5])
ax.plot(evoked.times, snr, color=[0.8, 0.4, 0.0])
ax.set_xlim(lims[:2])
ax.set_ylim(lims[2:])
ax.set_ylabel('SNR')
ax.set_xlabel('Time (sec)')
if evoked.comment is not None:
ax.set_title(evoked.comment)
plt.draw()
if show:
plt.show()
return fig
| bsd-3-clause |
haowu4682/gem5 | util/stats/output.py | 90 | 7981 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from chart import ChartOptions
class StatOutput(ChartOptions):
def __init__(self, jobfile, info, stat=None):
super(StatOutput, self).__init__()
self.jobfile = jobfile
self.stat = stat
self.invert = False
self.info = info
def display(self, name, printmode = 'G'):
import info
if printmode == 'G':
valformat = '%g'
elif printmode != 'F' and value > 1e6:
valformat = '%0.5e'
else:
valformat = '%f'
for job in self.jobfile.jobs():
value = self.info.get(job, self.stat)
if value is None:
return
if not isinstance(value, list):
value = [ value ]
if self.invert:
for i,val in enumerate(value):
if val != 0.0:
value[i] = 1 / val
valstring = ', '.join([ valformat % val for val in value ])
print '%-50s %s' % (job.name + ':', valstring)
def graph(self, name, graphdir, proxy=None):
from os.path import expanduser, isdir, join as joinpath
from barchart import BarChart
from matplotlib.numerix import Float, array, zeros
import os, re, urllib
from jobfile import crossproduct
confgroups = self.jobfile.groups()
ngroups = len(confgroups)
skiplist = [ False ] * ngroups
groupopts = []
baropts = []
groups = []
for i,group in enumerate(confgroups):
if group.flags.graph_group:
groupopts.append(group.subopts())
skiplist[i] = True
elif group.flags.graph_bars:
baropts.append(group.subopts())
skiplist[i] = True
else:
groups.append(group)
has_group = bool(groupopts)
if has_group:
groupopts = [ group for group in crossproduct(groupopts) ]
else:
groupopts = [ None ]
if baropts:
baropts = [ bar for bar in crossproduct(baropts) ]
else:
raise AttributeError, 'No group selected for graph bars'
directory = expanduser(graphdir)
if not isdir(directory):
os.mkdir(directory)
html = file(joinpath(directory, '%s.html' % name), 'w')
print >>html, '<html>'
print >>html, '<title>Graphs for %s</title>' % name
print >>html, '<body>'
html.flush()
for options in self.jobfile.options(groups):
chart = BarChart(self)
data = [ [ None ] * len(baropts) for i in xrange(len(groupopts)) ]
enabled = False
stacked = 0
for g,gopt in enumerate(groupopts):
for b,bopt in enumerate(baropts):
if gopt is None:
gopt = []
job = self.jobfile.job(options + gopt + bopt)
if not job:
continue
if proxy:
import db
proxy.dict['system'] = self.info[job.system]
val = self.info.get(job, self.stat)
if val is None:
print 'stat "%s" for job "%s" not found' % \
(self.stat, job)
if isinstance(val, (list, tuple)):
if len(val) == 1:
val = val[0]
else:
stacked = len(val)
data[g][b] = val
if stacked == 0:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
if data[i][j] is None:
data[i][j] = 0.0
else:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
val = data[i][j]
if val is None:
data[i][j] = [ 0.0 ] * stacked
elif len(val) != stacked:
raise ValueError, "some stats stacked, some not"
data = array(data)
if data.sum() == 0:
continue
dim = len(data.shape)
x = data.shape[0]
xkeep = [ i for i in xrange(x) if data[i].sum() != 0 ]
y = data.shape[1]
ykeep = [ i for i in xrange(y) if data[:,i].sum() != 0 ]
data = data.take(xkeep, axis=0)
data = data.take(ykeep, axis=1)
if not has_group:
data = data.take([ 0 ], axis=0)
chart.data = data
bopts = [ baropts[i] for i in ykeep ]
bdescs = [ ' '.join([o.desc for o in opt]) for opt in bopts]
if has_group:
gopts = [ groupopts[i] for i in xkeep ]
gdescs = [ ' '.join([o.desc for o in opt]) for opt in gopts]
if chart.legend is None:
if stacked:
try:
chart.legend = self.info.rcategories
except:
chart.legend = [ str(i) for i in xrange(stacked) ]
else:
chart.legend = bdescs
if chart.xticks is None:
if has_group:
chart.xticks = gdescs
else:
chart.xticks = []
chart.graph()
names = [ opt.name for opt in options ]
descs = [ opt.desc for opt in options ]
if names[0] == 'run':
names = names[1:]
descs = descs[1:]
basename = '%s-%s' % (name, ':'.join(names))
desc = ' '.join(descs)
pngname = '%s.png' % basename
psname = '%s.eps' % re.sub(':', '-', basename)
epsname = '%s.ps' % re.sub(':', '-', basename)
chart.savefig(joinpath(directory, pngname))
chart.savefig(joinpath(directory, epsname))
chart.savefig(joinpath(directory, psname))
html_name = urllib.quote(pngname)
print >>html, '''%s<br><img src="%s"><br>''' % (desc, html_name)
html.flush()
print >>html, '</body>'
print >>html, '</html>'
html.close()
| bsd-3-clause |
tammoippen/nest-simulator | pynest/examples/gif_population.py | 13 | 4003 | # -*- coding: utf-8 -*-
#
# gif_population.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Population of GIF neuron model with oscillatory behavior
--------------------------------------------------------
This script simulates a population of generalized integrate-and-fire (GIF)
model neurons driven by noise from a group of Poisson generators.
Due to spike-frequency adaptation, the GIF neurons tend to show oscillatory
behavior on the time scale comparable with the time constant of adaptation
elements (stc and sfa).
Population dynamics are visualized by raster plot and as average firing rate.
'''
'''
Import all necessary modules for simulation and plotting.
'''
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
nest.ResetKernel()
'''
Assigning the simulation parameters to variables.
'''
dt = 0.1
simtime = 2000.0
'''
Definition of neural parameters for the GIF model. These parameters are
extracted by fitting the model to experimental data: Mensi, S., Naud, R.,
Pozzorini, C., Avermann, M., Petersen, C.C. and Gerstner, W., 2012. Parameter
extraction and classification of three cortical neuron types reveals two
distinct adaptation mechanisms. Journal of Neurophysiology, 107(6),
pp.1756-1775.
'''
neuron_params = {"C_m": 83.1,
"g_L": 3.7,
"E_L": -67.0,
"Delta_V": 1.4,
"V_T_star": -39.6,
"t_ref": 4.0,
"V_reset": -36.7,
"lambda_0": 1.0,
"q_stc": [56.7, -6.9],
"tau_stc": [57.8, 218.2],
"q_sfa": [11.7, 1.8],
"tau_sfa": [53.8, 640.0],
"tau_syn_ex": 10.0,
}
'''
Definition of the parameters for the population of GIF neurons.
'''
N_ex = 100 # size of the population
p_ex = 0.3 # connection probability inside the population
w_ex = 30.0 # synaptic weights inside the population (pA)
'''
Definition of the parameters for the Poisson group and its connection with GIF
neurons population.
'''
N_noise = 50 # size of Poisson group
rate_noise = 10.0 # firing rate of Poisson neurons (Hz)
w_noise = 20.0 # synaptic weights from Poisson to population neurons (pA)
'''
Configuration of the simulation kernel with the previously defined time
resolution.
'''
nest.SetKernelStatus({"resolution": dt})
'''
Building a population of GIF neurons, a group of Poisson neurons and a
spike detector device for capturing spike times of the population.
'''
population = nest.Create("gif_psc_exp", N_ex, params=neuron_params)
noise = nest.Create("poisson_generator", N_noise, params={'rate': rate_noise})
spike_det = nest.Create("spike_detector")
'''
Build connections inside the population of GIF neurons population, between
Poisson group and the population, and also connecting spike detector to the
population.
'''
nest.Connect(
population, population, {'rule': 'pairwise_bernoulli', 'p': p_ex},
syn_spec={"weight": w_ex}
)
nest.Connect(noise, population, 'all_to_all', syn_spec={"weight": w_noise})
nest.Connect(population, spike_det)
'''
Simulation of the network.
'''
nest.Simulate(simtime)
'''
Plotting the results of simulation including raster plot and histogram of
population activity.
'''
nest.raster_plot.from_device(spike_det, hist=True)
plt.title('Population dynamics')
| gpl-2.0 |
thetomcraig/redwood | examples/gui/wx_mpl_dynamic_graph.py.py | 13 | 11139 | """
This demo demonstrates how to draw a dynamic mpl (matplotlib)
plot in a wxPython application.
It allows "live" plotting as well as manual zooming to specific
regions.
Both X and Y axes allow "auto" or "manual" settings. For Y, auto
mode sets the scaling of the graph to see all the data points.
For X, auto mode makes the graph "follow" the data. Set it X min
to manual 0 to always see the whole data from the beginning.
Note: press Enter in the 'manual' text box to make a new value
affect the plot.
Eli Bendersky ([email protected])
License: this code is in the public domain
Last modified: 31.07.2008
"""
import os
import pprint
import random
import sys
import wx
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import numpy as np
import pylab
class DataGen(object):
""" A silly class that generates pseudo-random data for
display in the plot.
"""
def __init__(self, init=50):
self.data = self.init = init
def next(self):
self._recalc_data()
return self.data
def _recalc_data(self):
delta = random.uniform(-0.5, 0.5)
r = random.random()
if r > 0.9:
self.data += delta * 15
elif r > 0.8:
# attraction to the initial value
delta += (0.5 if self.init > self.data else -0.5)
self.data += delta
else:
self.data += delta
class BoundControlBox(wx.Panel):
""" A static box with a couple of radio buttons and a text
box. Allows to switch between an automatic mode and a
manual mode with an associated value.
"""
def __init__(self, parent, ID, label, initval):
wx.Panel.__init__(self, parent, ID)
self.value = initval
box = wx.StaticBox(self, -1, label)
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
self.radio_auto = wx.RadioButton(self, -1,
label="Auto", style=wx.RB_GROUP)
self.radio_manual = wx.RadioButton(self, -1,
label="Manual")
self.manual_text = wx.TextCtrl(self, -1,
size=(35,-1),
value=str(initval),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_manual_text, self.manual_text)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.manual_text)
manual_box = wx.BoxSizer(wx.HORIZONTAL)
manual_box.Add(self.radio_manual, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_auto, 0, wx.ALL, 10)
sizer.Add(manual_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def on_update_manual_text(self, event):
self.manual_text.Enable(self.radio_manual.GetValue())
def on_text_enter(self, event):
self.value = self.manual_text.GetValue()
def is_auto(self):
return self.radio_auto.GetValue()
def manual_value(self):
return self.value
class GraphFrame(wx.Frame):
""" The main frame of the application
"""
title = 'Demo: dynamic matplotlib graph'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.datagen = DataGen()
self.data = [self.datagen.next()]
self.paused = False
self.create_menu()
self.create_status_bar()
self.create_main_panel()
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.redraw_timer.Start(100)
def create_menu(self):
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.xmin_control = BoundControlBox(self.panel, -1, "X min", 0)
self.xmax_control = BoundControlBox(self.panel, -1, "X max", 50)
self.ymin_control = BoundControlBox(self.panel, -1, "Y min", 0)
self.ymax_control = BoundControlBox(self.panel, -1, "Y max", 100)
self.pause_button = wx.Button(self.panel, -1, "Pause")
self.Bind(wx.EVT_BUTTON, self.on_pause_button, self.pause_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_pause_button, self.pause_button)
self.cb_grid = wx.CheckBox(self.panel, -1,
"Show Grid",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_grid, self.cb_grid)
self.cb_grid.SetValue(True)
self.cb_xlab = wx.CheckBox(self.panel, -1,
"Show X labels",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_xlab, self.cb_xlab)
self.cb_xlab.SetValue(True)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.pause_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(20)
self.hbox1.Add(self.cb_grid, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.cb_xlab, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.xmin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.xmax_control, border=5, flag=wx.ALL)
self.hbox2.AddSpacer(24)
self.hbox2.Add(self.ymin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.ymax_control, border=5, flag=wx.ALL)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox1, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def init_plot(self):
self.dpi = 100
self.fig = Figure((3.0, 3.0), dpi=self.dpi)
self.axes = self.fig.add_subplot(111)
self.axes.set_axis_bgcolor('black')
self.axes.set_title('Very important random data', size=12)
pylab.setp(self.axes.get_xticklabels(), fontsize=8)
pylab.setp(self.axes.get_yticklabels(), fontsize=8)
# plot the data as a line series, and save the reference
# to the plotted line series
#
self.plot_data = self.axes.plot(
self.data,
linewidth=1,
color=(1, 1, 0),
)[0]
def draw_plot(self):
""" Redraws the plot
"""
# when xmin is on auto, it "follows" xmax to produce a
# sliding window effect. therefore, xmin is assigned after
# xmax.
#
if self.xmax_control.is_auto():
xmax = len(self.data) if len(self.data) > 50 else 50
else:
xmax = int(self.xmax_control.manual_value())
if self.xmin_control.is_auto():
xmin = xmax - 50
else:
xmin = int(self.xmin_control.manual_value())
# for ymin and ymax, find the minimal and maximal values
# in the data set and add a mininal margin.
#
# note that it's easy to change this scheme to the
# minimal/maximal value in the current display, and not
# the whole data set.
#
if self.ymin_control.is_auto():
ymin = round(min(self.data), 0) - 1
else:
ymin = int(self.ymin_control.manual_value())
if self.ymax_control.is_auto():
ymax = round(max(self.data), 0) + 1
else:
ymax = int(self.ymax_control.manual_value())
self.axes.set_xbound(lower=xmin, upper=xmax)
self.axes.set_ybound(lower=ymin, upper=ymax)
# anecdote: axes.grid assumes b=True if any other flag is
# given even if b is set to False.
# so just passing the flag into the first statement won't
# work.
#
if self.cb_grid.IsChecked():
self.axes.grid(True, color='gray')
else:
self.axes.grid(False)
# Using setp here is convenient, because get_xticklabels
# returns a list over which one needs to explicitly
# iterate, and setp already handles this.
#
pylab.setp(self.axes.get_xticklabels(),
visible=self.cb_xlab.IsChecked())
self.plot_data.set_xdata(np.arange(len(self.data)))
self.plot_data.set_ydata(np.array(self.data))
self.canvas.draw()
def on_pause_button(self, event):
self.paused = not self.paused
def on_update_pause_button(self, event):
label = "Resume" if self.paused else "Pause"
self.pause_button.SetLabel(label)
def on_cb_grid(self, event):
self.draw_plot()
def on_cb_xlab(self, event):
self.draw_plot()
def on_save_plot(self, event):
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def on_redraw_timer(self, event):
# if paused do not add data, but still redraw the plot
# (to respond to scale modifications, grid change, etc.)
#
if not self.paused:
self.data.append(self.datagen.next())
self.draw_plot()
def on_exit(self, event):
self.Destroy()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = GraphFrame()
app.frame.Show()
app.MainLoop()
| isc |
fredhusser/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
yashchandak/GNN | Sample_Run/DOPE/eval_performance.py | 1 | 5530 | from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from collections import Counter
import numpy as np
def patk(predictions, labels):
pak = np.zeros(3)
K = np.array([1, 3, 5])
for i in range(predictions.shape[0]):
pos = np.argsort(-predictions[i, :])
y = labels[i, :]
y = y[pos]
for j in range(3):
k = K[j]
pak[j] += (np.sum(y[:k]) / k)
pak = pak / predictions.shape[0]
return pak
def cm_precision_recall(prediction, truth):
"""Evaluate confusion matrix, precision and recall for given set of labels and predictions
Args
prediction: a vector with predictions
truth: a vector with class labels
Returns:
cm: confusion matrix
precision: precision score
recall: recall score"""
confusion_matrix = Counter()
positives = [1]
binary_truth = [x in positives for x in truth]
binary_prediction = [x in positives for x in prediction]
for t, p in zip(binary_truth, binary_prediction):
confusion_matrix[t, p] += 1
cm = np.array([confusion_matrix[True, True], confusion_matrix[False, False], confusion_matrix[False, True],
confusion_matrix[True, False]])
# print cm
precision = (cm[0] / (cm[0] + cm[2] + 0.000001))
recall = (cm[0] / (cm[0] + cm[3] + 0.000001))
return cm, precision, recall
def bipartition_scores(labels, predictions):
""" Computes bipartitation metrics for a given multilabel predictions and labels
Args:
logits: Logits tensor, float - [batch_size, NUM_LABELS].
labels: Labels tensor, int32 - [batch_size, NUM_LABELS].
Returns:
bipartiation: an array with micro_precision, micro_recall, micro_f1,macro_precision, macro_recall, macro_f1"""
sum_cm = np.zeros((4))
macro_precision = 0
macro_recall = 0
for i in range(labels.shape[1]):
truth = labels[:, i]
prediction = predictions[:, i]
cm, precision, recall = cm_precision_recall(prediction, truth)
sum_cm += cm
macro_precision += precision
macro_recall += recall
macro_precision = macro_precision / labels.shape[1]
macro_recall = macro_recall / labels.shape[1]
# print(macro_recall, macro_precision)
macro_f1 = 2 * (macro_precision) * (macro_recall) / (macro_precision + macro_recall + 0.000001)
micro_precision = sum_cm[0] / (sum_cm[0] + sum_cm[2] + 0.000001)
micro_recall = sum_cm[0] / (sum_cm[0] + sum_cm[3] + 0.000001)
micro_f1 = 2 * (micro_precision) * (micro_recall) / (micro_precision + micro_recall + 0.000001)
bipartiation = np.asarray([micro_precision, micro_recall, micro_f1, macro_precision, macro_recall, macro_f1])
return bipartiation
def BAE(labels, predictions):
abs_error = (1 - predictions) * labels # consider error only for true classes
freq = np.sum(labels, axis=0) + 1e-15 # count the frequency of each label
num_labels = np.shape(labels)[1]
bae = np.sum(np.sum(abs_error, axis=0) / freq) / num_labels
# print(bae, np.sum(abs_error, axis=0), freq, num_labels)
return bae
def evaluate(predictions, labels, threshold=0, multi_label=False):
#predictions are logits here and binarized labels
assert predictions.shape == labels.shape, "Shapes: %s, %s" % (predictions.shape, labels.shape,)
metrics = dict()
metrics['cross_entropy'] = -np.mean(labels * np.log(predictions + .0000000001))
if not multi_label:
metrics['bae'] = BAE(labels, predictions)
labels, predictions = np.argmax(labels, axis=1), np.argmax(predictions, axis=1)
metrics['accuracy'] = accuracy_score(labels, predictions)
metrics['micro_precision'], metrics['micro_recall'], metrics['micro_f1'], _ = \
precision_recall_fscore_support(labels, predictions, average='micro')
metrics['macro_precision'], metrics['macro_recall'], metrics['macro_f1'], metrics['coverage'], \
metrics['average_precision'], metrics['ranking_loss'], metrics['pak'], metrics['hamming_loss'] \
= 0, 0, 0, 0, 0, 0, 0, 0
else:
metrics['accuracy'] = accuracy_score(np.argmax(labels, axis=1), np.argmax(predictions, axis=1))
if threshold:
for i in range(predictions.shape[0]):
predictions[i, :][predictions[i, :] >= threshold] = 1
predictions[i, :][predictions[i, :] < threshold] = 0
else: # TOP K
for i in range(predictions.shape[0]):
k = np.sum(labels[i])
pos = predictions[i].argsort()
predictions[i].fill(0)
predictions[i][pos[-int(k):]] = 1
metrics['bae'] = 0
metrics['coverage'] = coverage_error(labels, predictions)
metrics['average_precision'] = label_ranking_average_precision_score(labels, predictions)
metrics['ranking_loss'] = label_ranking_loss(labels, predictions)
metrics['pak'] = patk(predictions, labels)
metrics['hamming_loss'] = hamming_loss(labels, predictions)
metrics['micro_precision'], metrics['micro_recall'], metrics['micro_f1'], metrics['macro_precision'], \
metrics['macro_recall'], metrics['macro_f1'] = bipartition_scores(labels, predictions)
return metrics
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.