repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
whn09/tensorflow | tensorflow/python/estimator/inputs/pandas_io.py | 86 | 4503 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
TypeError: `shuffle` is not bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be explicitly set as boolean; '
'got {}'.format(shuffle))
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 |
tayebzaidi/HonorsThesisTZ | ThesisCode/gen_lightcurves/selectedToPDF_periodic.py | 1 | 4171 | #!/usr/bin/env python
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.gridspec as gridspec
import json
import os
import sys
import numpy as np
import math
def main():
destination_file = 'OGLE_subset_small.pdf'
source_directory = '../data/OGLE/parsed'
filename = 'selectedLightcurves_OGLE'
with open(filename, 'r') as f:
lightcurves = [line.rstrip('\n') for line in f]
with PdfPages(destination_file) as pdf:
for lightcurve in lightcurves:
lightcurve_path = os.path.join(source_directory,lightcurve)
with open(lightcurve_path, 'r') as f:
file_data = json.load(f)
#Ignore all non-CSP or CfA entries
# for k in list(file_data.keys()):
# if not (k.endswith('CSP') or ('CfA' in k)):
# del file_data[k]
# if len(file_data) == 0:
# continue
#This hack removes the '_gpsmoothed.json' from the string to return the objname
objname = lightcurve[:-16]
#Number of filters
N = len(file_data.keys())
print(N)
cols = 3
if N < 3:
cols = 1
rows = int(math.ceil(N / cols))
#To ensure that plot text fits without overlay
#Change font size to fit the text, taken from \
#http://stackoverflow.com/questions/3899980/how-to-change-the-font-size-on-a-matplotlib-plot\
# answer by Pedro M. Duarte
SIZE = 5
MEDIUM_SIZE = 8
BIGGER_SIZE = 10
plt.rc('font', size=SIZE) # controls default text sizes
plt.rc('axes', titlesize=SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
if rows > 3:
small_size = 2
plt.rc('font', size=small_size) # controls default text sizes
plt.rc('axes', titlesize=small_size) # fontsize of the axes title
plt.rc('axes', labelsize=small_size) # fontsize of the x and y labels
plt.rc('xtick', labelsize=small_size) # fontsize of the tick labels
plt.rc('ytick', labelsize=small_size)
gs = gridspec.GridSpec(rows, cols)
fig = plt.figure(figsize=(6, 6))
fig.suptitle(objname)
#Return the list of keys from the file_data
data = list(file_data)
for i in range(len(data)):
filt = data[i]
mjd = file_data[filt]['mjd']
mag = file_data[filt]['mag']
mag_err = file_data[filt]['dmag']
model_phase = file_data[filt]['modeldate']
model_mag = file_data[filt]['modelmag']
#bspline_mag = file_data[filt]['bsplinemag']
goodstatus = file_data[filt]['goodstatus']
type = file_data[filt]['type']
ax = fig.add_subplot(gs[i])
ax.errorbar(mjd, mag, fmt='r', yerr=mag_err,label='Original', alpha=0.7, linestyle=None)
ymin, ymax = ax.get_ylim()
ax.plot(model_phase, model_mag, '-k', label='GP')
#ax.plot(model_phase, bspline_mag, '-b', label='BSpline')
ax.set_title(filt)
handles, labels = ax.get_legend_handles_labels()
if(not goodstatus):
ax.set_ylim(ymin, ymax)
ax.invert_yaxis()
fig.legend(handles, labels, title=type)
pdf.savefig() # saves the current figure into a pdf page
plt.close(fig)
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/core/categorical.py | 1 | 66886 | # pylint: disable=E1101,W0232
import numpy as np
from warnings import warn
import types
from pandas import compat, lib
from pandas.compat import u
from pandas.core.algorithms import factorize, take_1d
from pandas.core.base import (PandasObject, PandasDelegate,
NoNewAttributesMixin, _shared_docs)
import pandas.core.common as com
from pandas.core.missing import interpolate_2d
from pandas.compat.numpy import function as nv
from pandas.util.decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
from pandas.core.common import (
ABCSeries, ABCIndexClass, ABCCategoricalIndex, isnull, notnull,
is_dtype_equal, is_categorical_dtype, is_integer_dtype,
_possibly_infer_to_datetimelike, is_list_like,
is_sequence, is_null_slice, is_bool, _ensure_object, _ensure_int64,
_coerce_indexer_dtype)
from pandas.types.api import CategoricalDtype
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option
def _cat_compare_op(op):
def f(self, other):
# On python2, you can usually compare any type to any type, and
# Categoricals can be seen as a custom type, but having different
# results depending whether categories are the same or not is kind of
# insane, so be a bit stricter here and use the python3 idea of
# comparing only things of equal type.
if not self.ordered:
if op in ['__lt__', '__gt__', '__le__', '__ge__']:
raise TypeError("Unordered Categoricals can only compare "
"equality or not")
if isinstance(other, Categorical):
# Two Categoricals can only be be compared if the categories are
# the same
if ((len(self.categories) != len(other.categories)) or
not ((self.categories == other.categories).all())):
raise TypeError("Categoricals can only be compared if "
"'categories' are the same")
if not (self.ordered == other.ordered):
raise TypeError("Categoricals can only be compared if "
"'ordered' is the same")
na_mask = (self._codes == -1) | (other._codes == -1)
f = getattr(self._codes, op)
ret = f(other._codes)
if na_mask.any():
# In other series, the leads to False, so do that here too
ret[na_mask] = False
return ret
# Numpy-1.9 and earlier may convert a scalar to a zerodim array during
# comparison operation when second arg has higher priority, e.g.
#
# cat[0] < cat
#
# With cat[0], for example, being ``np.int64(1)`` by the time it gets
# into this function would become ``np.array(1)``.
other = lib.item_from_zerodim(other)
if lib.isscalar(other):
if other in self.categories:
i = self.categories.get_loc(other)
return getattr(self._codes, op)(i)
else:
if op == '__eq__':
return np.repeat(False, len(self))
elif op == '__ne__':
return np.repeat(True, len(self))
else:
msg = ("Cannot compare a Categorical for op {op} with a "
"scalar, which is not a category.")
raise TypeError(msg.format(op=op))
else:
# allow categorical vs object dtype array comparisons for equality
# these are only positional comparisons
if op in ['__eq__', '__ne__']:
return getattr(np.array(self), op)(np.array(other))
msg = ("Cannot compare a Categorical for op {op} with type {typ}."
"\nIf you want to compare values, use 'np.asarray(cat) "
"<op> other'.")
raise TypeError(msg.format(op=op, typ=type(other)))
f.__name__ = op
return f
def maybe_to_categorical(array):
""" coerce to a categorical if a series is given """
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
return array
_codes_doc = """The category codes of this categorical.
Level codes are an array if integer which are the positions of the real
values in the categories array.
There is not setter, use the other categorical methods and the normal item
setter to change values in the categorical.
"""
_categories_doc = """The categories of this categorical.
Setting assigns new values to each category (effectively a rename of
each individual category).
The assigned value has to be a list-like object. All items must be unique and
the number of items in the new categories must be the same as the number of
items in the old categories.
Assigning to `categories` is a inplace operation!
Raises
------
ValueError
If the new categories do not validate as categories or if the number of new
categories is unequal the number of old categories
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
class Categorical(PandasObject):
"""
Represents a categorical variable in classic R / S-plus fashion
`Categoricals` can only take on only a limited, and usually fixed, number
of possible values (`categories`). In contrast to statistical categorical
variables, a `Categorical` might have an order, but numerical operations
(additions, divisions, ...) are not possible.
All values of the `Categorical` are either in `categories` or `np.nan`.
Assigning values outside of `categories` will raise a `ValueError`. Order
is defined by the order of the `categories`, not lexical order of the
values.
Parameters
----------
values : list-like
The values of the categorical. If categories are given, values not in
categories will be replaced with NaN.
categories : Index-like (unique), optional
The unique categories for this categorical. If not given, the
categories are assumed to be the unique values of values.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered categorical.
If not given, the resulting categorical will not be ordered.
Attributes
----------
categories : Index
The categories of this categorical
codes : ndarray
The codes (integer positions, which point to the categories) of this
categorical, read only.
ordered : boolean
Whether or not this Categorical is ordered.
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
Examples
--------
>>> from pandas import Categorical
>>> Categorical([1, 2, 3, 1, 2, 3])
[1, 2, 3, 1, 2, 3]
Categories (3, int64): [1 < 2 < 3]
>>> Categorical(['a', 'b', 'c', 'a', 'b', 'c'])
[a, b, c, a, b, c]
Categories (3, object): [a < b < c]
>>> a = Categorical(['a','b','c','a','b','c'], ['c', 'b', 'a'],
ordered=True)
>>> a.min()
'c'
"""
dtype = CategoricalDtype()
"""The dtype (always "category")"""
"""Whether or not this Categorical is ordered.
Only ordered `Categoricals` can be sorted (according to the order
of the categories) and have a min and max value.
See also
--------
Categorical.sort
Categorical.order
Categorical.min
Categorical.max
"""
# For comparisons, so that numpy uses our implementation if the compare
# ops, which raise
__array_priority__ = 1000
_typ = 'categorical'
def __init__(self, values, categories=None, ordered=False, name=None,
fastpath=False, levels=None):
if fastpath:
# fast path
self._codes = _coerce_indexer_dtype(values, categories)
self._categories = self._validate_categories(
categories, fastpath=isinstance(categories, ABCIndexClass))
self._ordered = ordered
return
if name is not None:
msg = ("the 'name' keyword is removed, use 'name' with consumers "
"of the categorical instead (e.g. 'Series(cat, "
"name=\"something\")'")
warn(msg, UserWarning, stacklevel=2)
# TODO: Remove after deprecation period in 2017/ after 0.18
if levels is not None:
warn("Creating a 'Categorical' with 'levels' is deprecated, use "
"'categories' instead", FutureWarning, stacklevel=2)
if categories is None:
categories = levels
else:
raise ValueError("Cannot pass in both 'categories' and "
"(deprecated) 'levels', use only "
"'categories'", stacklevel=2)
# sanitize input
if is_categorical_dtype(values):
# we are either a Series or a CategoricalIndex
if isinstance(values, (ABCSeries, ABCCategoricalIndex)):
values = values._values
if ordered is None:
ordered = values.ordered
if categories is None:
categories = values.categories
values = values.__array__()
elif isinstance(values, (ABCIndexClass, ABCSeries)):
pass
else:
# on numpy < 1.6 datetimelike get inferred to all i8 by
# _sanitize_array which is fine, but since factorize does this
# correctly no need here this is an issue because _sanitize_array
# also coerces np.nan to a string under certain versions of numpy
# as well
values = _possibly_infer_to_datetimelike(values,
convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# On list with NaNs, int values will be converted to float. Use
# "object" dtype to prevent this. In the end objects will be
# casted to int/... in the category assignment step.
dtype = 'object' if isnull(values).any() else None
values = _sanitize_array(values, None, dtype=dtype)
if categories is None:
try:
codes, categories = factorize(values, sort=True)
except TypeError:
codes, categories = factorize(values, sort=False)
if ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
categories = self._validate_categories(categories)
else:
# there were two ways if categories are present
# - the old one, where each value is a int pointer to the levels
# array -> not anymore possible, but code outside of pandas could
# call us like that, so make some checks
# - the new one, where each value is also in the categories array
# (or np.nan)
# make sure that we always have the same type here, no matter what
# we get passed in
categories = self._validate_categories(categories)
codes = _get_codes_for_values(values, categories)
# TODO: check for old style usage. These warnings should be removes
# after 0.18/ in 2016
if is_integer_dtype(values) and not is_integer_dtype(categories):
warn("Values and categories have different dtypes. Did you "
"mean to use\n'Categorical.from_codes(codes, "
"categories)'?", RuntimeWarning, stacklevel=2)
if (len(values) and is_integer_dtype(values) and
(codes == -1).all()):
warn("None of the categories were found in values. Did you "
"mean to use\n'Categorical.from_codes(codes, "
"categories)'?", RuntimeWarning, stacklevel=2)
self.set_ordered(ordered or False, inplace=True)
self._categories = categories
self._codes = _coerce_indexer_dtype(codes, categories)
def copy(self):
""" Copy constructor. """
return Categorical(values=self._codes.copy(),
categories=self.categories, ordered=self.ordered,
fastpath=True)
def astype(self, dtype):
""" coerce this type to another dtype """
if is_categorical_dtype(dtype):
return self
return np.array(self, dtype=dtype)
@cache_readonly
def ndim(self):
"""Number of dimensions of the Categorical """
return self._codes.ndim
@cache_readonly
def size(self):
""" return the len of myself """
return len(self)
@cache_readonly
def itemsize(self):
""" return the size of a single category """
return self.categories.itemsize
def reshape(self, new_shape, *args, **kwargs):
"""
An ndarray-compatible method that returns
`self` because categorical instances cannot
actually be reshaped.
"""
nv.validate_reshape(args, kwargs)
return self
@property
def base(self):
""" compat, we are always our own object """
return None
@classmethod
def from_array(cls, data, **kwargs):
"""
Make a Categorical type from a single array-like object.
For internal compatibility with numpy arrays.
Parameters
----------
data : array-like
Can be an Index or array-like. The categories are assumed to be
the unique values of `data`.
"""
return Categorical(data, **kwargs)
@classmethod
def from_codes(cls, codes, categories, ordered=False, name=None):
"""
Make a Categorical type from codes and categories arrays.
This constructor is useful if you already have codes and categories and
so do not need the (computation intensive) factorization step, which is
usually done on the constructor.
If your data does not follow this convention, please use the normal
constructor.
Parameters
----------
codes : array-like, integers
An integer array, where each integer points to a category in
categories or -1 for NaN
categories : index-like
The categories for the categorical. Items need to be unique.
ordered : boolean, (default False)
Whether or not this categorical is treated as a ordered
categorical. If not given, the resulting categorical will be
unordered.
"""
if name is not None:
msg = ("the 'name' keyword is removed, use 'name' with consumers "
"of the categorical instead (e.g. 'Series(cat, "
"name=\"something\")'")
warn(msg, UserWarning, stacklevel=2)
try:
codes = np.asarray(codes, np.int64)
except:
raise ValueError(
"codes need to be convertible to an arrays of integers")
categories = cls._validate_categories(categories)
if len(codes) and (codes.max() >= len(categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return Categorical(codes, categories=categories, ordered=ordered,
fastpath=True)
_codes = None
def _get_codes(self):
""" Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v
def _set_codes(self, codes):
"""
Not settable by the user directly
"""
raise ValueError("cannot set Categorical codes directly")
codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc)
def _get_labels(self):
"""
Get the category labels (deprecated).
Deprecated, use .codes!
"""
warn("'labels' is deprecated. Use 'codes' instead", FutureWarning,
stacklevel=2)
return self.codes
labels = property(fget=_get_labels, fset=_set_codes)
_categories = None
@classmethod
def _validate_categories(cls, categories, fastpath=False):
"""
Validates that we have good categories
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
"""
if not isinstance(categories, ABCIndexClass):
dtype = None
if not hasattr(categories, "dtype"):
categories = _convert_to_list_like(categories)
# On categories with NaNs, int values would be converted to
# float. Use "object" dtype to prevent this.
if isnull(categories).any():
without_na = np.array([x for x in categories
if notnull(x)])
with_na = np.array(categories)
if with_na.dtype != without_na.dtype:
dtype = "object"
from pandas import Index
categories = Index(categories, dtype=dtype)
if not fastpath:
# check properties of the categories
# we don't allow NaNs in the categories themselves
if categories.hasnans:
# NaNs in cats deprecated in 0.17,
# remove in 0.18 or 0.19 GH 10748
msg = ('\nSetting NaNs in `categories` is deprecated and '
'will be removed in a future version of pandas.')
warn(msg, FutureWarning, stacklevel=3)
# categories must be unique
if not categories.is_unique:
raise ValueError('Categorical categories must be unique')
return categories
def _set_categories(self, categories, fastpath=False):
""" Sets new categories
Parameters
----------
fastpath : boolean (default: False)
Don't perform validation of the categories for uniqueness or nulls
"""
categories = self._validate_categories(categories, fastpath=fastpath)
if (not fastpath and self._categories is not None and
len(categories) != len(self._categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._categories = categories
def _get_categories(self):
""" Gets the categories """
# categories is an Index, which is immutable -> no need to copy
return self._categories
categories = property(fget=_get_categories, fset=_set_categories,
doc=_categories_doc)
def _set_levels(self, levels):
""" set new levels (deprecated, use "categories") """
warn("Assigning to 'levels' is deprecated, use 'categories'",
FutureWarning, stacklevel=2)
self.categories = levels
def _get_levels(self):
""" Gets the levels (deprecated, use "categories") """
warn("Accessing 'levels' is deprecated, use 'categories'",
FutureWarning, stacklevel=2)
return self.categories
# TODO: Remove after deprecation period in 2017/ after 0.18
levels = property(fget=_get_levels, fset=_set_levels)
_ordered = None
def _set_ordered(self, value):
""" Sets the ordered attribute to the boolean value """
warn("Setting 'ordered' directly is deprecated, use 'set_ordered'",
FutureWarning, stacklevel=2)
self.set_ordered(value, inplace=True)
def set_ordered(self, value, inplace=False):
"""
Sets the ordered attribute to the boolean value
Parameters
----------
value : boolean to set whether this categorical is ordered (True) or
not (False)
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to the value
"""
if not is_bool(value):
raise TypeError("ordered must be a boolean value")
cat = self if inplace else self.copy()
cat._ordered = value
if not inplace:
return cat
def as_ordered(self, inplace=False):
"""
Sets the Categorical to be ordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to True
"""
return self.set_ordered(True, inplace=inplace)
def as_unordered(self, inplace=False):
"""
Sets the Categorical to be unordered
Parameters
----------
inplace : boolean (default: False)
Whether or not to set the ordered attribute inplace or return a copy
of this categorical with ordered set to False
"""
return self.set_ordered(False, inplace=inplace)
def _get_ordered(self):
""" Gets the ordered attribute """
return self._ordered
ordered = property(fget=_get_ordered, fset=_set_ordered)
def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
""" Sets the categories to the specified new_categories.
`new_categories` can include new categories (which will result in
unused categories) or remove old categories (which results in values
set to NaN). If `rename==True`, the categories will simple be renamed
(less or more items than in old categories will result in values set to
NaN or in unused categories respectively).
This method can be used to perform more than one action of adding,
removing, and reordering simultaneously and is therefore faster than
performing the individual steps via the more specialised methods.
On the other hand this methods does not do checks (e.g., whether the
old categories are included in the new categories on a reorder), which
can result in surprising changes, for example when using special string
dtypes on python3, which does not considers a S1 string equal to a
single char python string.
Raises
------
ValueError
If new_categories does not validate as categories
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, (default: False)
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
rename : boolean (default: False)
Whether or not the new_categories should be considered as a rename
of the old categories or as reordered categories.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
"""
new_categories = self._validate_categories(new_categories)
cat = self if inplace else self.copy()
if rename:
if (cat._categories is not None and
len(new_categories) < len(cat._categories)):
# remove all _codes which are larger and set to -1/NaN
self._codes[self._codes >= len(new_categories)] = -1
else:
values = cat.__array__()
cat._codes = _get_codes_for_values(values, new_categories)
cat._categories = new_categories
if ordered is None:
ordered = self.ordered
cat.set_ordered(ordered, inplace=True)
if not inplace:
return cat
def rename_categories(self, new_categories, inplace=False):
""" Renames categories.
The new categories has to be a list-like object. All items must be
unique and the number of items in the new categories must be the same
as the number of items in the old categories.
Raises
------
ValueError
If the new categories do not have the same number of items than the
current categories or do not validate as categories
Parameters
----------
new_categories : Index-like
The renamed categories.
inplace : boolean (default: False)
Whether or not to rename the categories inplace or return a copy of
this categorical with renamed categories.
Returns
-------
cat : Categorical with renamed categories added or None if inplace.
See also
--------
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
cat = self if inplace else self.copy()
cat.categories = new_categories
if not inplace:
return cat
def reorder_categories(self, new_categories, ordered=None, inplace=False):
""" Reorders categories as specified in new_categories.
`new_categories` need to include all old categories and no new category
items.
Raises
------
ValueError
If the new categories do not contain all old category items or any
new ones
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : boolean, optional
Whether or not the categorical is treated as a ordered categorical.
If not given, do not change the ordered information.
inplace : boolean (default: False)
Whether or not to reorder the categories inplace or return a copy of
this categorical with reordered categories.
Returns
-------
cat : Categorical with reordered categories or None if inplace.
See also
--------
rename_categories
add_categories
remove_categories
remove_unused_categories
set_categories
"""
if set(self._categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace)
def add_categories(self, new_categories, inplace=False):
""" Add new categories.
`new_categories` will be included at the last/highest place in the
categories and will be unused directly after this call.
Raises
------
ValueError
If the new categories include old categories or do not validate as
categories
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : boolean (default: False)
Whether or not to add the categories inplace or return a copy of
this categorical with added categories.
Returns
-------
cat : Categorical with new categories added or None if inplace.
See also
--------
rename_categories
reorder_categories
remove_categories
remove_unused_categories
set_categories
"""
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self._categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: %s" %
str(already_included))
raise ValueError(msg)
new_categories = list(self._categories) + list(new_categories)
cat = self if inplace else self.copy()
cat._categories = self._validate_categories(new_categories)
cat._codes = _coerce_indexer_dtype(cat._codes, new_categories)
if not inplace:
return cat
def remove_categories(self, removals, inplace=False):
""" Removes the specified categories.
`removals` must be included in the old categories. Values which were in
the removed categories will be set to NaN
Raises
------
ValueError
If the removals are not contained in the categories
Parameters
----------
removals : category or list of categories
The categories which should be removed.
inplace : boolean (default: False)
Whether or not to remove the categories inplace or return a copy of
this categorical with removed categories.
Returns
-------
cat : Categorical with removed categories or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_unused_categories
set_categories
"""
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self._categories)
new_categories = [c for c in self._categories if c not in removal_set]
# GH 10156
if any(isnull(removals)):
not_included = [x for x in not_included if notnull(x)]
new_categories = [x for x in new_categories if notnull(x)]
if len(not_included) != 0:
raise ValueError("removals must all be in old categories: %s" %
str(not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace)
def remove_unused_categories(self, inplace=False):
""" Removes categories which are not used.
Parameters
----------
inplace : boolean (default: False)
Whether or not to drop unused categories inplace or return a copy of
this categorical with unused categories dropped.
Returns
-------
cat : Categorical with unused categories dropped or None if inplace.
See also
--------
rename_categories
reorder_categories
add_categories
remove_categories
set_categories
"""
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
cat._codes = inv
cat._categories = cat.categories.take(idx)
if not inplace:
return cat
def map(self, mapper):
"""
Apply mapper function to its categories (not codes).
Parameters
----------
mapper : callable
Function to be applied. When all categories are mapped
to different categories, the result will be Categorical which has
the same order property as the original. Otherwise, the result will
be np.ndarray.
Returns
-------
applied : Categorical or np.ndarray.
"""
new_categories = self.categories.map(mapper)
try:
return Categorical.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
return np.take(new_categories, self._codes)
__eq__ = _cat_compare_op('__eq__')
__ne__ = _cat_compare_op('__ne__')
__lt__ = _cat_compare_op('__lt__')
__gt__ = _cat_compare_op('__gt__')
__le__ = _cat_compare_op('__le__')
__ge__ = _cat_compare_op('__ge__')
# for Series/ndarray like compat
@property
def shape(self):
""" Shape of the Categorical.
For internal compatibility with numpy arrays.
Returns
-------
shape : tuple
"""
return tuple([len(self._codes)])
def shift(self, periods):
"""
Shift Categorical by desired number of periods.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : Categorical
"""
# since categoricals always have ndim == 1, an axis parameter
# doesnt make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, com._ensure_platform_int(periods), axis=0)
if periods > 0:
codes[:periods] = -1
else:
codes[periods:] = -1
return Categorical.from_codes(codes, categories=self.categories,
ordered=self.ordered)
def __array__(self, dtype=None):
"""
The numpy array interface.
Returns
-------
values : numpy array
A numpy array of either the specified dtype or,
if dtype==None (default), the same dtype as
categorical.categories.dtype
"""
ret = take_1d(self.categories.values, self._codes)
if dtype and not is_dtype_equal(dtype, self.categories.dtype):
return np.asarray(ret, dtype)
return ret
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if not isinstance(state, dict):
raise Exception('invalid pickle state')
# Provide compatibility with pre-0.15.0 Categoricals.
if '_codes' not in state and 'labels' in state:
state['_codes'] = state.pop('labels')
if '_categories' not in state and '_levels' in state:
state['_categories'] = self._validate_categories(state.pop(
'_levels'))
# 0.16.0 ordered change
if '_ordered' not in state:
# >=15.0 < 0.16.0
if 'ordered' in state:
state['_ordered'] = state.pop('ordered')
else:
state['_ordered'] = False
for k, v in compat.iteritems(state):
setattr(self, k, v)
@property
def T(self):
return self
@property
def nbytes(self):
return self._codes.nbytes + self._categories.values.nbytes
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self._codes.nbytes + self._categories.memory_usage(deep=deep)
@Substitution(klass='Categorical', value='v')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, v, side='left', sorter=None):
if not self.ordered:
raise ValueError("Categorical not ordered\nyou can use "
".as_ordered() to change the Categorical to an "
"ordered one")
from pandas.core.series import Series
values_as_codes = self.categories.values.searchsorted(
Series(v).values, side=side)
return self.codes.searchsorted(values_as_codes, sorter=sorter)
def isnull(self):
"""
Detect missing values
Both missing values (-1 in .codes) and NA as a category are detected.
Returns
-------
a boolean array of whether my values are null
See also
--------
pandas.isnull : pandas version
Categorical.notnull : boolean inverse of Categorical.isnull
"""
ret = self._codes == -1
# String/object and float categories can hold np.nan
if self.categories.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.categories:
nan_pos = np.where(isnull(self.categories))[0]
# we only have one NA in categories
ret = np.logical_or(ret, self._codes == nan_pos)
return ret
def notnull(self):
"""
Reverse of isnull
Both missing values (-1 in .codes) and NA as a category are detected as
null.
Returns
-------
a boolean array of whether my values are not null
See also
--------
pandas.notnull : pandas version
Categorical.isnull : boolean inverse of Categorical.notnull
"""
return ~self.isnull()
def put(self, *args, **kwargs):
"""
Replace specific elements in the Categorical with given values.
"""
raise NotImplementedError(("'put' is not yet implemented "
"for Categorical"))
def dropna(self):
"""
Return the Categorical without null values.
Both missing values (-1 in .codes) and NA as a category are detected.
NA is removed from the categories if present.
Returns
-------
valid : Categorical
"""
result = self[self.notnull()]
if isnull(result.categories).any():
result = result.remove_categories([np.nan])
return result
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is a category.
Returns
-------
counts : Series
"""
from numpy import bincount
from pandas.core.common import isnull
from pandas.core.series import Series
from pandas.core.index import CategoricalIndex
obj = (self.remove_categories([np.nan]) if dropna and
isnull(self.categories).any() else self)
code, cat = obj._codes, obj.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = Categorical(ix, categories=cat, ordered=obj.ordered,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self):
""" Return the values.
For internal compatibility with pandas formatting.
Returns
-------
values : numpy array
A numpy array of the same dtype as categorical.categories.dtype or
Index if datetime / periods
"""
# if we are a datetime and period index, return Index to keep metadata
if com.is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
return np.array(self)
def check_for_ordered(self, op):
""" assert that we are ordered """
if not self.ordered:
raise TypeError("Categorical is not ordered for operation {op}\n"
"you can use .as_ordered() to change the "
"Categorical to an ordered one\n".format(op=op))
def argsort(self, ascending=True, *args, **kwargs):
"""
Returns the indices that would sort the Categorical instance if
'sort_values' was called. This function is implemented to provide
compatibility with numpy ndarray objects.
While an ordering is applied to the category values, arg-sorting
in this context refers more to organizing and grouping together
based on matching category values. Thus, this function can be
called on an unordered Categorical instance unlike the functions
'Categorical.min' and 'Categorical.max'.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
"""
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
result = np.argsort(self._codes.copy(), **kwargs)
if not ascending:
result = result[::-1]
return result
def sort_values(self, inplace=False, ascending=True, na_position='last'):
""" Sorts the Categorical by category value returning a new
Categorical by default.
While an ordering is applied to the category values, sorting in this
context refers more to organizing and grouping together based on
matching category values. Thus, this function can be called on an
unordered Categorical instance unlike the functions 'Categorical.min'
and 'Categorical.max'.
Parameters
----------
inplace : boolean, default False
Do operation in place.
ascending : boolean, default True
Order ascending. Passing False orders descending. The
ordering parameter provides the method by which the
category values are organized.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Returns
-------
y : Categorical or None
See Also
--------
Categorical.sort
Examples
--------
>>> c = pd.Categorical([1, 2, 2, 1, 5])
>>> c
[1, 2, 2, 1, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values()
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>> c.sort_values(ascending=False)
[5, 2, 2, 1, 1]
Categories (3, int64): [1, 2, 5]
Inplace sorting can be done as well:
>>> c.sort_values(inplace=True)
>>> c
[1, 1, 2, 2, 5]
Categories (3, int64): [1, 2, 5]
>>>
>>> c = pd.Categorical([1, 2, 2, 1, 5])
'sort_values' behaviour with NaNs. Note that 'na_position'
is independent of the 'ascending' parameter:
>>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])
>>> c
[NaN, 2.0, 2.0, NaN, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values()
[2.0, 2.0, 5.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False)
[5.0, 2.0, 2.0, NaN, NaN]
Categories (2, int64): [2, 5]
>>> c.sort_values(na_position='first')
[NaN, NaN, 2.0, 2.0, 5.0]
Categories (2, int64): [2, 5]
>>> c.sort_values(ascending=False, na_position='first')
[NaN, NaN, 5.0, 2.0, 2.0]
Categories (2, int64): [2, 5]
"""
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
codes = np.sort(self._codes)
if not ascending:
codes = codes[::-1]
# NaN handling
na_mask = (codes == -1)
if na_mask.any():
n_nans = len(codes[na_mask])
if na_position == "first":
# in this case sort to the front
new_codes = codes.copy()
new_codes[0:n_nans] = -1
new_codes[n_nans:] = codes[~na_mask]
codes = new_codes
elif na_position == "last":
# ... and to the end
new_codes = codes.copy()
pos = len(codes) - n_nans
new_codes[0:pos] = codes[~na_mask]
new_codes[pos:] = -1
codes = new_codes
if inplace:
self._codes = codes
return
else:
return Categorical(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
def order(self, inplace=False, ascending=True, na_position='last'):
"""
DEPRECATED: use :meth:`Categorical.sort_values`. That function
is entirely equivalent to this one.
See Also
--------
Categorical.sort_values
"""
warn("order is deprecated, use sort_values(...)", FutureWarning,
stacklevel=2)
return self.sort_values(inplace=inplace, ascending=ascending,
na_position=na_position)
def sort(self, inplace=True, ascending=True, na_position='last', **kwargs):
"""
DEPRECATED: use :meth:`Categorical.sort_values`. That function
is just like this one, except that a new Categorical is returned
by default, so make sure to pass in 'inplace=True' to get
inplace sorting.
See Also
--------
Categorical.sort_values
"""
warn("sort is deprecated, use sort_values(...)", FutureWarning,
stacklevel=2)
nv.validate_sort(tuple(), kwargs)
return self.sort_values(inplace=inplace, ascending=ascending,
na_position=na_position)
def ravel(self, order='C'):
""" Return a flattened (numpy) array.
For internal compatibility with numpy arrays.
Returns
-------
raveled : numpy array
"""
return np.array(self)
def view(self):
"""Return a view of myself.
For internal compatibility with numpy arrays.
Returns
-------
view : Categorical
Returns `self`!
"""
return self
def to_dense(self):
"""Return my 'dense' representation
For internal compatibility with numpy arrays.
Returns
-------
dense : array
"""
return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value')
def fillna(self, value=None, method=None, limit=None):
""" Fill NA/NaN values using the specified method.
Parameters
----------
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
value : scalar
Value to use to fill holes (e.g. 0)
limit : int, default None
(Not implemented yet for Categorical!)
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled.
Returns
-------
filled : Categorical with NA/NaN filled
"""
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
values = self._codes
# Make sure that we also get NA in categories
if self.categories.dtype.kind in ['S', 'O', 'f']:
if np.nan in self.categories:
values = values.copy()
nan_pos = np.where(isnull(self.categories))[0]
# we only have one NA in categories
values[values == nan_pos] = -1
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
values = _get_codes_for_values(values, self.categories)
else:
if not isnull(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = values == -1
if mask.any():
values = values.copy()
values[mask] = self.categories.get_loc(value)
return Categorical(values, categories=self.categories,
ordered=self.ordered, fastpath=True)
def take_nd(self, indexer, allow_fill=True, fill_value=None):
""" Take the codes by the indexer, fill with the fill_value.
For internal compatibility with numpy arrays.
"""
# filling must always be None/nan here
# but is passed thru internally
assert isnull(fill_value)
codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1)
result = Categorical(codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
return result
take = take_nd
def _slice(self, slicer):
""" Return a slice of myself.
For internal compatibility with numpy arrays.
"""
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
_codes = self._codes[slicer]
return Categorical(values=_codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
def __len__(self):
"""The length of this Categorical."""
return len(self._codes)
def __iter__(self):
"""Returns an Iterator over the values of this Categorical."""
return iter(self.get_values())
def _tidy_repr(self, max_vals=10, footer=True):
""" a short repr displaying only max_vals and an optional (but default
footer)
"""
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = '%s, ..., %s' % (head[:-1], tail[1:])
if footer:
result = '%s\n%s' % (result, self._repr_footer())
return compat.text_type(result)
def _repr_categories(self):
""" return the base repr for the categories """
max_categories = (10 if get_option("display.max_categories") == 0 else
get_option("display.max_categories"))
from pandas.formats import format as fmt
if len(self.categories) > max_categories:
num = max_categories // 2
head = fmt.format_array(self.categories[:num], None)
tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
else:
category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
return category_strs
def _repr_categories_info(self):
""" Returns a string representation of the footer."""
category_strs = self._repr_categories()
dtype = getattr(self.categories, 'dtype_str',
str(self.categories.dtype))
levheader = "Categories (%d, %s): " % (len(self.categories), dtype)
width, height = get_terminal_size()
max_width = get_option("display.width") or width
if com.in_ipython_frontend():
# 0 = no breaks
max_width = 0
levstring = ""
start = True
cur_col_len = len(levheader) # header
sep_len, sep = (3, " < ") if self.ordered else (2, ", ")
linesep = sep.rstrip() + "\n" # remove whitespace
for val in category_strs:
if max_width != 0 and cur_col_len + sep_len + len(val) > max_width:
levstring += linesep + (" " * (len(levheader) + 1))
cur_col_len = len(levheader) + 1 # header + a whitespace
elif not start:
levstring += sep
cur_col_len += len(val)
levstring += val
start = False
# replace to simple save space by
return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
def _repr_footer(self):
return u('Length: %d\n%s') % (len(self), self._repr_categories_info())
def _get_repr(self, length=True, na_rep='NaN', footer=True):
from pandas.formats import format as fmt
formatter = fmt.CategoricalFormatter(self, length=length,
na_rep=na_rep, footer=footer)
result = formatter.to_string()
return compat.text_type(result)
def __unicode__(self):
""" Unicode representation. """
_maxlen = 10
if len(self._codes) > _maxlen:
result = self._tidy_repr(_maxlen)
elif len(self._codes) > 0:
result = self._get_repr(length=len(self) > _maxlen)
else:
result = ('[], %s' %
self._get_repr(length=False,
footer=True, ).replace("\n", ", "))
return result
def _maybe_coerce_indexer(self, indexer):
""" return an indexer coerced to the codes dtype """
if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i':
indexer = indexer.astype(self._codes.dtype)
return indexer
def __getitem__(self, key):
""" Return an item. """
if isinstance(key, (int, np.integer)):
i = self._codes[key]
if i == -1:
return np.nan
else:
return self.categories[i]
else:
return Categorical(values=self._codes[key],
categories=self.categories,
ordered=self.ordered,
fastpath=True)
def __setitem__(self, key, value):
""" Item assignment.
Raises
------
ValueError
If (one or more) Value is not in categories or if a assigned
`Categorical` does not have the same categories
"""
# require identical categories set
if isinstance(value, Categorical):
if not value.categories.equals(self.categories):
raise ValueError("Cannot set a Categorical with another, "
"without identical categories")
rvalue = value if is_list_like(value) else [value]
from pandas import Index
to_add = Index(rvalue).difference(self.categories)
# no assignments of values not in categories, but it's always ok to set
# something to np.nan
if len(to_add) and not isnull(to_add).all():
raise ValueError("Cannot setitem on a Categorical with a new "
"category, set the categories first")
# set by position
if isinstance(key, (int, np.integer)):
pass
# tuple of indexers (dataframe)
elif isinstance(key, tuple):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if len(key) == 2:
if not is_null_slice(key[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
key = key[1]
elif len(key) == 1:
key = key[0]
else:
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
# slicing in Series or Categorical
elif isinstance(key, slice):
pass
# Array of True/False in Series or Categorical
else:
# There is a bug in numpy, which does not accept a Series as a
# indexer
# https://github.com/pydata/pandas/issues/6168
# https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9
# FIXME: remove when numpy 1.9 is the lowest numpy version pandas
# accepts...
key = np.asarray(key)
lindexer = self.categories.get_indexer(rvalue)
# FIXME: the following can be removed after GH7820 is fixed:
# https://github.com/pydata/pandas/issues/7820
# float categories do currently return -1 for np.nan, even if np.nan is
# included in the index -> "repair" this here
if isnull(rvalue).any() and isnull(self.categories).any():
nan_pos = np.where(isnull(self.categories))[0]
lindexer[lindexer == -1] = nan_pos
lindexer = self._maybe_coerce_indexer(lindexer)
self._codes[key] = lindexer
# reduction ops #
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation """
func = getattr(self, name, None)
if func is None:
raise TypeError("Categorical cannot perform the operation "
"{op}".format(op=name))
return func(numeric_only=numeric_only, **kwds)
def min(self, numeric_only=None, **kwargs):
""" The minimum value of the object.
Only ordered `Categoricals` have a minimum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
min : the minimum of this `Categorical`
"""
self.check_for_ordered('min')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].min(**kwargs)
else:
pointer = self._codes.min(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def max(self, numeric_only=None, **kwargs):
""" The maximum value of the object.
Only ordered `Categoricals` have a maximum!
Raises
------
TypeError
If the `Categorical` is not `ordered`.
Returns
-------
max : the maximum of this `Categorical`
"""
self.check_for_ordered('max')
if numeric_only:
good = self._codes != -1
pointer = self._codes[good].max(**kwargs)
else:
pointer = self._codes.max(**kwargs)
if pointer == -1:
return np.nan
else:
return self.categories[pointer]
def mode(self):
"""
Returns the mode(s) of the Categorical.
Empty if nothing occurs at least 2 times. Always returns `Categorical`
even if only one value.
Returns
-------
modes : `Categorical` (sorted)
"""
import pandas.hashtable as htable
good = self._codes != -1
values = sorted(htable.mode_int64(_ensure_int64(self._codes[good])))
result = Categorical(values=values, categories=self.categories,
ordered=self.ordered, fastpath=True)
return result
def unique(self):
"""
Return the ``Categorical`` which ``categories`` and ``codes`` are
unique. Unused categories are NOT returned.
- unordered category: values and categories are sorted by appearance
order.
- ordered category: values are sorted by appearance order, categories
keeps existing order.
Returns
-------
unique values : ``Categorical``
"""
from pandas.core.nanops import unique1d
# unlike np.unique, unique1d does not sort
unique_codes = unique1d(self.codes)
cat = self.copy()
# keep nan in codes
cat._codes = unique_codes
# exclude nan from indexer for categories
take_codes = unique_codes[unique_codes != -1]
if self.ordered:
take_codes = sorted(take_codes)
return cat.set_categories(cat.categories.take(take_codes))
def equals(self, other):
"""
Returns True if categorical arrays are equal.
Parameters
----------
other : `Categorical`
Returns
-------
are_equal : boolean
"""
return (self.is_dtype_equal(other) and
np.array_equal(self._codes, other._codes))
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return (self.categories.equals(other.categories) and
self.ordered == other.ordered)
except (AttributeError, TypeError):
return False
def describe(self):
""" Describes this Categorical
Returns
-------
description: `DataFrame`
A dataframe with frequency and counts by category.
"""
counts = self.value_counts(dropna=False)
freqs = counts / float(counts.sum())
from pandas.tools.merge import concat
result = concat([counts, freqs], axis=1)
result.columns = ['counts', 'freqs']
result.index.name = 'categories'
return result
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of a Categorical.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
codes = self._codes.repeat(repeats)
return Categorical(values=codes, categories=self.categories,
ordered=self.ordered, fastpath=True)
# The Series.cat accessor
class CategoricalAccessor(PandasDelegate, NoNewAttributesMixin):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation, while all
methods return new categorical data per default (but can be called with
`inplace=True`).
Examples
--------
>>> s.cat.categories
>>> s.cat.categories = list('abc')
>>> s.cat.rename_categories(list('cab'))
>>> s.cat.reorder_categories(list('cab'))
>>> s.cat.add_categories(['d','e'])
>>> s.cat.remove_categories(['d'])
>>> s.cat.remove_unused_categories()
>>> s.cat.set_categories(list('abcde'))
>>> s.cat.as_ordered()
>>> s.cat.as_unordered()
"""
def __init__(self, values, index):
self.categorical = values
self.index = index
self._freeze()
def _delegate_property_get(self, name):
return getattr(self.categorical, name)
def _delegate_property_set(self, name, new_values):
return setattr(self.categorical, name, new_values)
@property
def codes(self):
from pandas import Series
return Series(self.categorical.codes, index=self.index)
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self.categorical, name)
res = method(*args, **kwargs)
if res is not None:
return Series(res, index=self.index)
CategoricalAccessor._add_delegate_accessors(delegate=Categorical,
accessors=["categories",
"ordered"],
typ='property')
CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=[
"rename_categories", "reorder_categories", "add_categories",
"remove_categories", "remove_unused_categories", "set_categories",
"as_ordered", "as_unordered"], typ='method')
# utility routines
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
if not is_dtype_equal(values.dtype, categories.dtype):
values = _ensure_object(values)
categories = _ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return _coerce_indexer_dtype(t.lookup(vals), cats)
def _convert_to_list_like(list_like):
if hasattr(list_like, "dtype"):
return list_like
if isinstance(list_like, list):
return list_like
if (is_sequence(list_like) or isinstance(list_like, tuple) or
isinstance(list_like, types.GeneratorType)):
return list(list_like)
elif lib.isscalar(list_like):
return [list_like]
else:
# is this reached?
return [list_like]
| mit |
YorkUIRLab/eosdb | experiment/textClassifier-master/textClassifierRNN.py | 1 | 5840 | # author - Richard Liao
# Dec 26 2016
import numpy as np
import pandas as pd
import cPickle
from collections import defaultdict
import re
from bs4 import BeautifulSoup
import sys
import os
os.environ['KERAS_BACKEND']='theano'
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Embedding
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding, Merge, Dropout, LSTM, GRU, Bidirectional
from keras.models import Model
from keras import backend as K
from keras.engine.topology import Layer, InputSpec
from keras import initializations
MAX_SEQUENCE_LENGTH = 1000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
def clean_str(string):
"""
Tokenization/string cleaning for dataset
Every dataset is lower cased except
"""
string = re.sub(r"\\", "", string)
string = re.sub(r"\'", "", string)
string = re.sub(r"\"", "", string)
return string.strip().lower()
data_train = pd.read_csv('~/Testground/data/imdb/labeledTrainData.tsv', sep='\t')
print data_train.shape
texts = []
labels = []
for idx in range(data_train.review.shape[0]):
text = BeautifulSoup(data_train.review[idx])
texts.append(clean_str(text.get_text().encode('ascii','ignore')))
labels.append(data_train.sentiment[idx])
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
print('Traing and validation set number of positive and negative reviews')
print y_train.sum(axis=0)
print y_val.sum(axis=0)
GLOVE_DIR = "~/Testground/data/glove"
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Total %s word vectors.' % len(embeddings_index))
embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
l_lstm = Bidirectional(LSTM(100))(embedded_sequences)
preds = Dense(2, activation='softmax')(l_lstm)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("model fitting - Bidirectional LSTM")
model.summary()
model.fit(x_train, y_train, validation_data=(x_val, y_val),
nb_epoch=10, batch_size=50)
# Attention GRU network
class AttLayer(Layer):
def __init__(self, **kwargs):
self.init = initializations.get('normal')
#self.input_spec = [InputSpec(ndim=3)]
super(AttLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape)==3
#self.W = self.init((input_shape[-1],1))
self.W = self.init((input_shape[-1],))
#self.input_spec = [InputSpec(shape=input_shape)]
self.trainable_weights = [self.W]
super(AttLayer, self).build(input_shape) # be sure you call this somewhere!
def call(self, x, mask=None):
eij = K.tanh(K.dot(x, self.W))
ai = K.exp(eij)
weights = ai/K.sum(ai, axis=1).dimshuffle(0,'x')
weighted_input = x*weights.dimshuffle(0,1,'x')
return weighted_input.sum(axis=1)
def get_output_shape_for(self, input_shape):
return (input_shape[0], input_shape[-1])
embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
l_gru = Bidirectional(GRU(100, return_sequences=True))(embedded_sequences)
l_att = AttLayer()(l_gru)
preds = Dense(2, activation='softmax')(l_att)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("model fitting - attention GRU network")
model.summary()
model.fit(x_train, y_train, validation_data=(x_val, y_val),
nb_epoch=10, batch_size=50)
| lgpl-3.0 |
mikebenfield/scikit-learn | sklearn/utils/tests/test_multiclass.py | 58 | 14316 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.metaestimators import _safe_split
from sklearn.model_selection import ShuffleSplit
from sklearn.svm import SVC
from sklearn import datasets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = datasets.load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = ShuffleSplit(test_size=0.25, random_state=0)
train, test = list(cv.split(X))[0]
X_train, y_train = _safe_split(clf, X, y, train)
K_train, y_train2 = _safe_split(clfp, K, y, train)
assert_array_almost_equal(K_train, np.dot(X_train, X_train.T))
assert_array_almost_equal(y_train, y_train2)
X_test, y_test = _safe_split(clf, X, y, test, train)
K_test, y_test2 = _safe_split(clfp, K, y, test, train)
assert_array_almost_equal(K_test, np.dot(X_test, X_train.T))
assert_array_almost_equal(y_test, y_test2)
| bsd-3-clause |
openego/eDisGo | edisgo/flex_opt/curtailment.py | 1 | 17595 | import pandas as pd
import logging
from pyomo.environ import ConcreteModel, Set, Param, Objective, Constraint, \
minimize, Var
from pyomo.opt import SolverFactory
def voltage_based(feedin, generators, curtailment_timeseries, edisgo,
curtailment_key, **kwargs):
"""
Implements curtailment methodology 'voltage-based'.
The curtailment that has to be met in each time step is allocated depending
on the exceedance of the allowed voltage deviation at the nodes of the
generators. The higher the exceedance, the higher the curtailment.
The optional parameter `voltage_threshold` specifies the threshold for the
exceedance of the allowed voltage deviation above which a generator is
curtailed. By default it is set to zero, meaning that all generators at
nodes with voltage deviations that exceed the allowed voltage deviation are
curtailed. Generators at nodes where the allowed voltage deviation is not
exceeded are not curtailed. In the case that the required curtailment
exceeds the weather-dependent availability of all generators with voltage
deviations above the specified threshold, the voltage threshold is lowered
in steps of 0.01 p.u. until the curtailment target can be met.
Above the threshold, the curtailment is proportional to the exceedance of
the allowed voltage deviation. In order to find the linear relation between
the curtailment and the voltage difference a linear problem is formulated
and solved using the python package pyomo. See documentation for further
information.
Parameters
----------
feedin : :pandas:`pandas.DataFrame<dataframe>`
Dataframe holding the feed-in of each generator in kW for the
technology (and weather cell) specified in `curtailment_key` parameter.
Index of the dataframe is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`. Columns are the
representatives of the fluctuating generators.
generators : :pandas:`pandas.DataFrame<dataframe>`
Dataframe with all generators of the type (and in weather cell)
specified in `curtailment_key` parameter. See return value of
:func:`edisgo.grid.tools.get_gen_info` for more information.
curtailment_timeseries : :pandas:`pandas.Series<series>`
The curtailment in kW to be distributed amongst the generators in
`generators` parameter. Index of the series is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
edisgo : :class:`edisgo.grid.network.EDisGo`
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment is specified for.
voltage_threshold: :obj:`float`
The node voltage below which no curtailment is assigned to the
respective generator if not necessary. Default: 0.0.
solver: :obj:`str`
The solver used to optimize the curtailment assigned to the generator.
Possible options are:
* 'cbc'
coin-or branch and cut solver
* 'glpk'
gnu linear programming kit solver
* any other available compatible with 'pyomo' like 'gurobi'
or 'cplex'
Default: 'cbc'
"""
voltage_threshold = pd.Series(kwargs.get('voltage_threshold', 0.0),
index=curtailment_timeseries.index)
solver = kwargs.get('solver', 'cbc')
combined_analysis = kwargs.get('combined_analysis', False)
# get the voltages at the generators
voltages_lv_gens = edisgo.network.results.v_res(
nodes=generators.loc[(generators.voltage_level == 'lv')].index,
level='lv')
voltages_mv_gens = edisgo.network.results.v_res(
nodes=generators.loc[(generators.voltage_level == 'mv')].index,
level='mv')
voltages_gens = voltages_lv_gens.join(voltages_mv_gens)
# get voltages at stations
grids = list(set(generators.grid))
lv_stations = [_.station for _ in grids if 'LVStation' in repr(_.station)]
voltage_lv_stations = edisgo.network.results.v_res(
nodes=lv_stations, level='lv')
voltages_mv_station = edisgo.network.results.v_res(
nodes=[edisgo.network.mv_grid.station], level='mv')
voltages_stations = voltage_lv_stations.join(voltages_mv_station)
# get allowed voltage deviations
if not combined_analysis:
allowed_voltage_dev_mv = edisgo.network.config[
'grid_expansion_allowed_voltage_deviations'][
'mv_feedin_case_max_v_deviation']
allowed_voltage_diff_lv = edisgo.network.config[
'grid_expansion_allowed_voltage_deviations'][
'lv_feedin_case_max_v_deviation']
else:
allowed_voltage_dev_mv = edisgo.network.config[
'grid_expansion_allowed_voltage_deviations'][
'mv_lv_feedin_case_max_v_deviation']
allowed_voltage_diff_lv = edisgo.network.config[
'grid_expansion_allowed_voltage_deviations'][
'mv_lv_feedin_case_max_v_deviation']
generators['allowed_voltage_dev'] = generators.voltage_level.apply(
lambda _: allowed_voltage_diff_lv if _ == 'lv'
else allowed_voltage_dev_mv)
# calculate voltage difference from generator node to station
voltage_gens_diff = pd.DataFrame()
for gen in voltages_gens.columns:
station = generators[generators.gen_repr==gen].grid.values[0].station
voltage_gens_diff[gen] = voltages_gens.loc[:, gen] - \
voltages_stations.loc[:, repr(station)] - \
generators[generators.gen_repr ==
gen].allowed_voltage_dev.iloc[0]
# for every time step check if curtailment can be fulfilled, otherwise
# reduce voltage threshold; set feed-in of generators below voltage
# threshold to zero, so that they cannot be curtailed
for ts in curtailment_timeseries.index:
# get generators with voltage higher than threshold
gen_pool = voltage_gens_diff.loc[
ts, voltage_gens_diff.loc[ts, :] > voltage_threshold.loc[ts]].index
# if curtailment cannot be fulfilled lower voltage threshold
while sum(feedin.loc[ts, gen_pool]) < curtailment_timeseries.loc[ts]:
voltage_threshold.loc[ts] = voltage_threshold.loc[ts] - 0.01
gen_pool = voltage_gens_diff.loc[
ts, voltage_gens_diff.loc[ts, :] >
voltage_threshold.loc[ts]].index
# set feed-in of generators below voltage threshold to zero, so that
# they cannot be curtailed
gen_pool_out = voltage_gens_diff.loc[
ts, voltage_gens_diff.loc[ts, :] <=
voltage_threshold.loc[ts]].index
feedin.loc[ts, gen_pool_out] = 0
# only optimize for time steps where curtailment is greater than zero
timeindex = curtailment_timeseries[curtailment_timeseries > 0].index
if not timeindex.empty:
curtailment = _optimize_voltage_based_curtailment(
feedin, voltage_gens_diff, curtailment_timeseries,
voltage_threshold, timeindex, solver)
else:
curtailment = pd.DataFrame()
# set curtailment for other time steps to zero
curtailment = curtailment.append(pd.DataFrame(
0, columns=feedin.columns, index=curtailment_timeseries[
curtailment_timeseries <= 0].index))
# check if curtailment target was met
_check_curtailment_target(curtailment, curtailment_timeseries,
curtailment_key)
# assign curtailment to individual generators
_assign_curtailment(curtailment, edisgo, generators, curtailment_key)
def _optimize_voltage_based_curtailment(feedin, voltage_pu, total_curtailment,
voltage_threshold, timeindex, solver):
"""
Formulates and solves linear problem to find linear relation between
curtailment and node voltage.
Parameters
------------
feedin : :pandas:`pandas.DataFrame<dataframe>`
See `feedin` parameter in
:func:`edisgo.flex_opt.curtailment.voltage_based` for more information.
voltage_pu : :pandas:`pandas.DataFrame<dataframe>
Dataframe containing voltages in p.u. at the generator nodes. Index
of the dataframe is a :pandas:`pandas.DatetimeIndex<datetimeindex>`,
columns are the generator representatives.
total_curtailment : :pandas:`pandas.Series<series>`
Series containing the specific curtailment in kW to be allocated to the
generators. The index is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
voltage_threshold : :pandas:`pandas.Series<series>`
Series containing the voltage thresholds in p.u. below which no
generator curtailment will occur. The index is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
solver : :obj:`str`
The solver used to optimize the linear problem. Default: 'cbc'.
Returns
-------
:pandas:`pandas:DataFrame<dataframe>`
Dataframe containing the curtailment in kW per generator and time step
feed-in was provided for in `feedin` parameter. Index is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are the
generator representatives.
"""
logging.debug("Start curtailment optimization.")
v_max = voltage_pu.max(axis=1)
generators = feedin.columns
# additional curtailment factors
cf_add = pd.DataFrame(index=timeindex)
for gen in generators:
cf_add[gen] = abs(
(voltage_pu.loc[timeindex, gen] - v_max[timeindex]) / (
voltage_threshold[timeindex] - v_max[timeindex]))
# curtailment factors
cf = pd.DataFrame(index=timeindex)
for gen in generators:
cf[gen] = abs(
(voltage_pu.loc[timeindex, gen] - voltage_threshold[timeindex]) / (
v_max[timeindex] - voltage_threshold[timeindex]))
# initialize model
model = ConcreteModel()
# add sets
model.T = Set(initialize=timeindex)
model.G = Set(initialize=generators)
# add parameters
def feedin_init(model, t, g):
return feedin.loc[t, g]
model.feedin = Param(model.T, model.G, initialize=feedin_init)
def voltage_pu_init(model, t, g):
return voltage_pu.loc[t, g]
model.voltage_pu = Param(model.T, model.G, initialize=voltage_pu_init)
def cf_add_init(model, t, g):
return cf_add.loc[t, g]
model.cf_add = Param(model.T, model.G, initialize=cf_add_init)
def cf_init(model, t, g):
return cf.loc[t, g]
model.cf = Param(model.T, model.G, initialize=cf_init)
def total_curtailment_init(model, t):
return total_curtailment.loc[t]
model.total_curtailment = Param(model.T, initialize=total_curtailment_init)
# add variables
model.offset = Var(model.T, bounds=(0, 1))
model.cf_max = Var(model.T, bounds=(0, 1))
def curtailment_init(model, t, g):
return (0, feedin.loc[t, g])
model.c = Var(model.T, model.G, bounds=curtailment_init)
# add objective
def obj_rule(model):
expr = (sum(model.offset[t] * 100
for t in model.T))
return expr
model.obj = Objective(rule=obj_rule, sense=minimize)
# add constraints
# curtailment per generator constraints
def curtail(model, t, g):
return (
model.cf[t, g] * model.cf_max[t] * model.feedin[t, g] + model.cf_add[
t, g] * model.offset[t] * model.feedin[t, g] - model.c[t, g] == 0)
model.curtailment = Constraint(model.T, model.G, rule=curtail)
# total curtailment constraint
def total_curtailment(model, t):
return (
sum(model.c[t, g] for g in model.G) == model.total_curtailment[t])
model.sum_curtailment = Constraint(model.T, rule=total_curtailment)
# solve
solver = SolverFactory(solver)
results = solver.solve(model, tee=False)
# load results back into model
model.solutions.load_from(results)
return pd.DataFrame({g: [model.c[t, g].value for t in model.T]
for g in model.G}, index=model.T)
def feedin_proportional(feedin, generators, curtailment_timeseries, edisgo,
curtailment_key, **kwargs):
"""
Implements curtailment methodology 'feedin-proportional'.
The curtailment that has to be met in each time step is allocated
equally to all generators depending on their share of total
feed-in in that time step.
Parameters
----------
feedin : :pandas:`pandas.DataFrame<dataframe>`
Dataframe holding the feed-in of each generator in kW for the
technology (and weather cell) specified in `curtailment_key` parameter.
Index of the dataframe is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`. Columns are the
representatives of the fluctuating generators.
generators : :pandas:`pandas.DataFrame<dataframe>`
Dataframe with all generators of the type (and in weather cell)
specified in `curtailment_key` parameter. See return value of
:func:`edisgo.grid.tools.get_gen_info` for more information.
curtailment_timeseries : :pandas:`pandas.Series<series>`
The curtailment in kW to be distributed amongst the generators in
`generators` parameter. Index of the series is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
edisgo : :class:`edisgo.grid.network.EDisGo`
curtailment_key::obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment is specified for.
"""
# calculate curtailment in each time step of each generator
curtailment = feedin.divide(feedin.sum(axis=1), axis=0). \
multiply(curtailment_timeseries, axis=0)
# substitute NaNs from division with 0 by 0
curtailment.fillna(0, inplace=True)
# check if curtailment target was met
_check_curtailment_target(curtailment, curtailment_timeseries,
curtailment_key)
# assign curtailment to individual generators
_assign_curtailment(curtailment, edisgo, generators, curtailment_key)
def _check_curtailment_target(curtailment, curtailment_target,
curtailment_key):
"""
Raises an error if curtailment target was not met in any time step.
Parameters
-----------
curtailment : :pandas:`pandas:DataFrame<dataframe>`
Dataframe containing the curtailment in kW per generator and time step.
Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are
the generator representatives.
curtailment_target : :pandas:`pandas.Series<series>`
The curtailment in kW that was to be distributed amongst the
generators. Index of the series is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment was specified for.
"""
if not (abs(curtailment.sum(axis=1) - curtailment_target) < 1e-1).all():
message = 'Curtailment target not met for {}.'.format(curtailment_key)
logging.error(message)
raise TypeError(message)
def _assign_curtailment(curtailment, edisgo, generators, curtailment_key):
"""
Helper function to write curtailment time series to generator objects.
This function also writes a list of the curtailed generators to curtailment
in :class:`edisgo.grid.network.TimeSeries` and
:class:`edisgo.grid.network.Results`.
Parameters
----------
curtailment : :pandas:`pandas.DataFrame<dataframe>`
Dataframe containing the curtailment in kW per generator and time step
for all generators of the type (and in weather cell) specified in
`curtailment_key` parameter. Index is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are the
generator representatives.
edisgo : :class:`edisgo.grid.network.EDisGo`
generators : :pandas:`pandas.DataFrame<dataframe>`
Dataframe with all generators of the type (and in weather cell)
specified in `curtailment_key` parameter. See return value of
:func:`edisgo.grid.tools.get_gen_info` for more information.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment is specified for.
"""
gen_object_list = []
for gen in curtailment.columns:
# get generator object from representative
gen_object = generators.loc[generators.gen_repr == gen].index[0]
# assign curtailment to individual generators
gen_object.curtailment = curtailment.loc[:, gen]
gen_object_list.append(gen_object)
# set timeseries.curtailment
if edisgo.network.timeseries._curtailment:
edisgo.network.timeseries._curtailment.extend(gen_object_list)
edisgo.network.results._curtailment[curtailment_key] = \
gen_object_list
else:
edisgo.network.timeseries._curtailment = gen_object_list
# list needs to be copied, otherwise it will be extended every time
# a new key is added to results._curtailment
edisgo.network.results._curtailment = \
{curtailment_key: gen_object_list.copy()}
| agpl-3.0 |
ImAlexisSaez/deep-learning-specialization-coursera | course_1/week_4/assignment_2/dnn_app_utils.py | 1 | 14746 | import numpy as np
import matplotlib.pyplot as plt
import h5py
def sigmoid(Z):
"""
Implements the sigmoid activation in numpy
Arguments:
Z -- numpy array of any shape
Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def relu(Z):
"""
Implement the RELU function.
Arguments:
Z -- Output of the linear layer, of any shape
Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
"""
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def load_data():
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(1)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y, 1))
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
def initialize_parameters_deep(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1]) #*0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def linear_forward(A, W, b):
"""
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
"""
Z = W.dot(A) + b
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
def linear_activation_forward(A_prev, W, b, activation):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
def L_model_forward(X, parameters):
"""
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
"""
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], activation = "relu")
caches.append(cache)
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], activation = "sigmoid")
caches.append(cache)
assert(AL.shape == (1,X.shape[1]))
return AL, caches
def compute_cost(AL, Y):
"""
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
"""
m = Y.shape[1]
# Compute loss from aL and y.
cost = (1./m) * (-np.dot(Y,np.log(AL).T) - np.dot(1-Y, np.log(1-AL).T))
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
def linear_backward(dZ, cache):
"""
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1./m * np.dot(dZ,A_prev.T)
db = 1./m * np.sum(dZ)
dA_prev = np.dot(W.T,dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (isinstance(db, float))
return dA_prev, dW, db
def linear_activation_backward(dA, cache, activation):
"""
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
"""
linear_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)
return dA_prev, dW, db
def L_model_backward(AL, Y, caches):
"""
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (there are (L-1) or them, indexes from 0 to L-2)
the cache of linear_activation_forward() with "sigmoid" (there is one, index L-1)
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
current_cache = caches[L-1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 2)], current_cache, activation = "relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(L):
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l+1)]
return parameters
def predict(X, y, parameters):
"""
This function is used to predict the results of a L-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
n = len(parameters) // 2 # number of layers in the neural network
p = np.zeros((1,m))
# Forward propagation
probas, caches = L_model_forward(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, probas.shape[1]):
if probas[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
#print results
#print ("predictions: " + str(p))
#print ("true labels: " + str(y))
print("Accuracy: " + str(np.sum((p == y)/m)))
return p
def print_mislabeled_images(classes, X, y, p):
"""
Plots images where predictions and truth were different.
X -- dataset
y -- true labels
p -- predictions
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots
num_images = len(mislabeled_indices[0])
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(2, num_images, i + 1)
plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
plt.axis('off')
plt.title("Prediction: " + classes[p[0,index]].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
| mit |
akionakamura/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
DSLituiev/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/series/test_quantile.py | 2 | 6305 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
from pandas import Index, Series
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.dtypes.common import is_integer
import pandas.util.testing as tm
from .common import TestData
class TestSeriesQuantile(TestData):
def test_quantile(self):
q = self.ts.quantile(0.1)
assert q == np.percentile(self.ts.dropna(), 10)
q = self.ts.quantile(0.9)
assert q == np.percentile(self.ts.dropna(), 90)
# object dtype
q = Series(self.ts, dtype=object).quantile(0.9)
assert q == np.percentile(self.ts.dropna(), 90)
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
assert q == Timestamp('2000-01-10 19:12:00')
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
assert q == pd.to_timedelta('24:00:00')
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
assert result == pd.Timedelta(0)
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assert_raises_regex(ValueError, msg):
self.ts.quantile(invalid)
def test_quantile_multi(self):
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([np.percentile(self.ts.dropna(), 10),
np.percentile(self.ts.dropna(), 90)],
index=qs, name=self.ts.name)
tm.assert_series_equal(result, expected)
dts = self.ts.index.to_series()
dts.name = 'xxx'
result = dts.quantile((.2, .2))
expected = Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2], name='xxx')
tm.assert_series_equal(result, expected)
result = self.ts.quantile([])
expected = pd.Series([], name=self.ts.name, index=Index(
[], dtype=float))
tm.assert_series_equal(result, expected)
def test_quantile_interpolation(self):
# see gh-10174
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
assert q == np.percentile(self.ts.dropna(), 10)
q1 = self.ts.quantile(0.1)
assert q1 == np.percentile(self.ts.dropna(), 10)
# test with and without interpolation keyword
assert q == q1
def test_quantile_interpolation_dtype(self):
# GH #10174
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower')
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher')
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
def test_quantile_nan(self):
# GH 13098
s = pd.Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
assert result == expected
# all nan/empty
cases = [Series([]), Series([np.nan, np.nan])]
for s in cases:
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
tm.assert_series_equal(res, pd.Series([np.nan, np.nan],
index=[0.2, 0.3]))
@pytest.mark.parametrize('case', [
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')],
# NaT
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'), pd.NaT],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern'), pd.NaT],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days'), pd.NaT]])
def test_quantile_box(self, case):
s = pd.Series(case, name='XXX')
res = s.quantile(0.5)
assert res == case[1]
res = s.quantile([0.5])
exp = pd.Series([case[1]], index=[0.5], name='XXX')
tm.assert_series_equal(res, exp)
def test_datetime_timedelta_quantiles(self):
# covers #9694
assert pd.isna(Series([], dtype='M8[ns]').quantile(.5))
assert pd.isna(Series([], dtype='m8[ns]').quantile(.5))
def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile(0.5)
assert res is pd.NaT
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
@pytest.mark.parametrize('values, dtype', [
([0, 0, 0, 1, 2, 3], 'Sparse[int]'),
([0., None, 1., 2.], 'Sparse[float]'),
])
def test_quantile_sparse(self, values, dtype):
ser = pd.Series(values, dtype=dtype)
result = ser.quantile([0.5])
expected = pd.Series(np.asarray(ser)).quantile([0.5])
tm.assert_series_equal(result, expected)
def test_quantile_empty(self):
# floats
s = Series([], dtype='float64')
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# int
s = Series([], dtype='int64')
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# datetime
s = Series([], dtype='datetime64[ns]')
res = s.quantile(0.5)
assert res is pd.NaT
res = s.quantile([0.5])
exp = Series([pd.NaT], index=[0.5])
tm.assert_series_equal(res, exp)
| bsd-3-clause |
vantares/trading-with-python | cookbook/reconstructVXX/downloadVixFutures.py | 77 | 3012 | #-------------------------------------------------------------------------------
# Name: download CBOE futures
# Purpose: get VIX futures data from CBOE, process data to a single file
#
#
# Created: 15-10-2011
# Copyright: (c) Jev Kuznetsov 2011
# Licence: BSD
#-------------------------------------------------------------------------------
#!/usr/bin/env python
from urllib import urlretrieve
import os
from pandas import *
import datetime
import numpy as np
m_codes = ['F','G','H','J','K','M','N','Q','U','V','X','Z'] #month codes of the futures
codes = dict(zip(m_codes,range(1,len(m_codes)+1)))
#dataDir = os.path.dirname(__file__)+'/data'
dataDir = os.path.expanduser('~')+'/twpData/vixFutures'
print 'Data directory: ', dataDir
def saveVixFutureData(year,month, path, forceDownload=False):
''' Get future from CBOE and save to file '''
fName = "CFE_{0}{1}_VX.csv".format(m_codes[month],str(year)[-2:])
if os.path.exists(path+'\\'+fName) or forceDownload:
print 'File already downloaded, skipping'
return
urlStr = "http://cfe.cboe.com/Publish/ScheduledTask/MktData/datahouse/{0}".format(fName)
print 'Getting: %s' % urlStr
try:
urlretrieve(urlStr,path+'\\'+fName)
except Exception as e:
print e
def buildDataTable(dataDir):
""" create single data sheet """
files = os.listdir(dataDir)
data = {}
for fName in files:
print 'Processing: ', fName
try:
df = DataFrame.from_csv(dataDir+'/'+fName)
code = fName.split('.')[0].split('_')[1]
month = '%02d' % codes[code[0]]
year = '20'+code[1:]
newCode = year+'_'+month
data[newCode] = df
except Exception as e:
print 'Could not process:', e
full = DataFrame()
for k,df in data.iteritems():
s = df['Settle']
s.name = k
s[s<5] = np.nan
if len(s.dropna())>0:
full = full.join(s,how='outer')
else:
print s.name, ': Empty dataset.'
full[full<5]=np.nan
full = full[sorted(full.columns)]
# use only data after this date
startDate = datetime.datetime(2008,1,1)
idx = full.index >= startDate
full = full.ix[idx,:]
#full.plot(ax=gca())
fName = os.path.expanduser('~')+'/twpData/vix_futures.csv'
print 'Saving to ', fName
full.to_csv(fName)
if __name__ == '__main__':
if not os.path.exists(dataDir):
print 'creating data directory %s' % dataDir
os.makedirs(dataDir)
for year in range(2008,2013):
for month in range(12):
print 'Getting data for {0}/{1}'.format(year,month+1)
saveVixFutureData(year,month,dataDir)
print 'Raw wata was saved to {0}'.format(dataDir)
buildDataTable(dataDir) | bsd-3-clause |
louispotok/pandas | pandas/tests/test_panel.py | 1 | 105261 | # -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from warnings import catch_warnings
from datetime import datetime
import operator
import pytest
import numpy as np
from pandas.core.dtypes.common import is_float_dtype
from pandas import (Series, DataFrame, Index, date_range, isna, notna,
pivot, MultiIndex)
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.io.formats.printing import pprint_thing
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas.tseries.offsets import BDay, MonthEnd
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
ensure_clean, makeMixedDataFrame,
makeCustomDataframe as mkdf)
import pandas.core.panel as panelm
import pandas.util.testing as tm
import pandas.util._test_decorators as td
def make_test_panel():
with catch_warnings(record=True):
_panel = tm.makePanel()
tm.add_nans(_panel)
_panel = _panel.copy()
return _panel
class PanelTests(object):
panel = None
def test_pickle(self):
with catch_warnings(record=True):
unpickled = tm.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
with catch_warnings(record=True):
pytest.raises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
with catch_warnings(record=True):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
with catch_warnings(record=True):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
class SafeForLongAndSparse(object):
def test_repr(self):
repr(self.panel)
def test_copy_names(self):
with catch_warnings(record=True):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
assert getattr(self.panel, attr).name is None
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notna(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
@td.skip_if_no("numpy", min_version="1.10.0")
def test_prod(self):
self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod)
def test_median(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
with catch_warnings(record=True):
self._check_stat_op('min', np.min)
def test_max(self):
with catch_warnings(record=True):
self._check_stat_op('max', np.max)
@td.skip_if_no_scipy
def test_skew(self):
from scipy.stats import skew
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True,
skipna_alternative=None):
if obj is None:
obj = self.panel
# # set some NAs
# obj.loc[5:10] = np.nan
# obj.loc[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if name in ['sum', 'prod']:
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
pytest.raises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
tm.assert_raises_regex(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
assert 'ItemA' not in self.panel._item_cache
assert self.panel.items is new_items
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
assert self.panel[0].index is new_major
assert self.panel.major_axis is new_major
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
assert self.panel[0].columns is new_minor
assert self.panel.minor_axis is new_minor
def test_get_axis_number(self):
assert self.panel._get_axis_number('items') == 0
assert self.panel._get_axis_number('major') == 1
assert self.panel._get_axis_number('minor') == 2
with tm.assert_raises_regex(ValueError, "No axis named foo"):
self.panel._get_axis_number('foo')
with tm.assert_raises_regex(ValueError, "No axis named foo"):
self.panel.__ge__(self.panel, axis='foo')
def test_get_axis_name(self):
assert self.panel._get_axis_name(0) == 'items'
assert self.panel._get_axis_name(1) == 'major_axis'
assert self.panel._get_axis_name(2) == 'minor_axis'
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
def test_truncate(self):
with catch_warnings(record=True):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
def test_arith(self):
with catch_warnings(record=True):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
pytest.raises(Exception, self.panel.__add__,
self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
assert len(list(self.panel.iteritems())) == len(self.panel.items)
def test_combineFrame(self):
with catch_warnings(record=True):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(
result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow', 'mod']
if not compat.PY3:
ops.append('div')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
pprint_thing("Failing operation: %r" % 'div')
raise
def test_combinePanel(self):
with catch_warnings(record=True):
result = self.panel.add(self.panel)
assert_panel_equal(result, self.panel * 2)
def test_neg(self):
with catch_warnings(record=True):
assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
with catch_warnings(record=True):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
def test_select(self):
with catch_warnings(record=True):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(
2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
def test_abs(self):
with catch_warnings(record=True):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
assert_panel_equal(result, expected)
assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
assert result.name == 'A'
assert result2.name == 'A'
class CheckIndexing(object):
def test_getitem(self):
pytest.raises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
with catch_warnings(record=True):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
assert 'ItemA' not in self.panel.items
del self.panel['ItemB']
assert 'ItemB' not in self.panel.items
pytest.raises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
with catch_warnings(record=True):
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with pytest.raises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(
index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
assert self.panel['ItemG'].values.dtype == np.int64
assert self.panel['ItemE'].values.dtype == np.bool_
# object dtype
self.panel['ItemQ'] = 'foo'
assert self.panel['ItemQ'].values.dtype == np.object_
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
assert self.panel['ItemP'].values.dtype == np.bool_
pytest.raises(TypeError, self.panel.__setitem__, 'foo',
self.panel.loc[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assert_raises_regex(ValueError,
r"shape of value must be "
r"\(3, 2\), shape of given "
r"object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
with catch_warnings(record=True):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
with catch_warnings(record=True):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notna(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notna(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
with catch_warnings(record=True):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
assert result.name == 'ItemA'
# not contained
idx = self.panel.major_axis[0] - BDay()
pytest.raises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
with catch_warnings(record=True):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
assert xs['ItemA'].dtype == np.float64
assert xs['ItemD'].dtype == np.object_
def test_minor_xs(self):
with catch_warnings(record=True):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
pytest.raises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
with catch_warnings(record=True):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
assert xs['ItemA'].dtype == np.float64
assert xs['ItemD'].dtype == np.object_
def test_xs(self):
with catch_warnings(record=True):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
tm.assert_frame_equal(itemA, expected)
# Get a view by default.
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
assert np.isnan(self.panel['ItemA'].values).all()
# Mixed-type yields a copy.
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
assert result._is_copy is not None
def test_getitem_fancy_labels(self):
with catch_warnings(record=True):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.loc[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.loc[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.loc[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.loc[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.loc[items, :, :], p.reindex(items=items))
assert_panel_equal(p.loc[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.loc[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.iloc[:, -1, :]
expected = p.loc[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.loc[item], p[item])
assert_frame_equal(p.loc[item, :], p[item])
assert_frame_equal(p.loc[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.loc[:, date], p.major_xs(date))
assert_frame_equal(p.loc[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.loc[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.loc[item, date], p[item].loc[date])
assert_series_equal(p.loc[item, date, :], p[item].loc[date])
assert_series_equal(p.loc[item, :, col], p[item][col])
assert_series_equal(p.loc[:, date, col], p.major_xs(date).loc[col])
def test_getitem_fancy_xs_check_view(self):
with catch_warnings(record=True):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_getitem_callable(self):
with catch_warnings(record=True):
p = self.panel
# GH 12533
assert_frame_equal(p[lambda x: 'ItemB'], p.loc['ItemB'])
assert_panel_equal(p[lambda x: ['ItemB', 'ItemC']],
p.loc[['ItemB', 'ItemC']])
def test_ix_setitem_slice_dataframe(self):
with catch_warnings(record=True):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.loc[:, 22, [111, 333]] = b
assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
with catch_warnings(record=True):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort_values()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.loc[0, :, 0] = b
assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.loc[:, 0, 0] = b
assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.loc[0, 0, :] = b
assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
with catch_warnings(record=True):
p_orig = tm.makePanel()
df = p_orig.iloc[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, [0, 1, 3, 5], -2:] = df
out = p.iloc[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.loc[indexer]
obj.values[:] = 0
assert (obj.values == 0).all()
comp(cp.loc[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
with catch_warnings(record=True):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
with catch_warnings(record=True):
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
with catch_warnings(record=True):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
with catch_warnings(record=True):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
tm.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
pytest.raises(Exception, func, p1, tp)
# versus different objs
pytest.raises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
tm.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
with np.errstate(invalid='ignore'):
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
with catch_warnings(record=True):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assert_raises_regex(TypeError,
"There must be an argument "
"for each axis"):
self.panel.get_value('a')
def test_set_value(self):
with catch_warnings(record=True):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
tm.assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
assert isinstance(res, Panel)
assert res is not self.panel
assert res.get_value('ItemE', 'foo', 'bar') == 1.5
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
assert is_float_dtype(res3['ItemE'].values)
msg = ("There must be an argument for each "
"axis plus the value provided")
with tm.assert_raises_regex(TypeError, msg):
self.panel.set_value('a')
class TestPanel(PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
def setup_method(self, method):
self.panel = make_test_panel()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_constructor(self):
with catch_warnings(record=True):
# with BlockManager
wp = Panel(self.panel._data)
assert wp._data is self.panel._data
wp = Panel(self.panel._data, copy=True)
assert wp._data is not self.panel._data
tm.assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
assert wp.values.dtype == np.object_
vals = self.panel.values
# no copy
wp = Panel(vals)
assert wp.values is vals
# copy
wp = Panel(vals, copy=True)
assert wp.values is not vals
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
tm.assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3),
minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
tm.assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
with catch_warnings(record=True):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
pytest.raises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
with catch_warnings(record=True):
empty = Panel()
assert len(empty.items) == 0
assert len(empty.major_axis) == 0
assert len(empty.minor_axis) == 0
def test_constructor_observe_dtype(self):
with catch_warnings(record=True):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
assert panel.values.dtype == np.object_
def test_constructor_dtypes(self):
with catch_warnings(record=True):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
assert panel[i].values.dtype.name == dtype
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(
np.random.randn(2, 10, 5),
items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5),
dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with catch_warnings(record=True):
with tm.assert_raises_regex(ValueError, "The number of dimensions required is 3"): # noqa
Panel(np.random.randn(10, 2))
def test_consolidate(self):
with catch_warnings(record=True):
assert self.panel._data.is_consolidated()
self.panel['foo'] = 1.
assert not self.panel._data.is_consolidated()
panel = self.panel._consolidate()
assert panel._data.is_consolidated()
def test_ctor_dict(self):
with catch_warnings(record=True):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
tm.assert_index_equal(wp.major_axis, self.panel.major_axis)
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
tm.assert_index_equal(wp.major_axis, itemb.index[5:])
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = {k: v.reindex(wp.major_axis).fillna(0)
for k, v in compat.iteritems(d)}
result = Panel(dcasted, dtype=int)
expected = Panel({k: v.astype(int)
for k, v in compat.iteritems(dcasted)})
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel({k: v.astype(np.int32)
for k, v in compat.iteritems(dcasted)})
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
with catch_warnings(record=True):
data = {k: v.values for k, v in self.panel.iteritems()}
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
tm.assert_index_equal(result.major_axis, exp_major)
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
pytest.raises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
pytest.raises(Exception, Panel, data)
def test_ctor_orderedDict(self):
with catch_warnings(record=True):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
assert list(p.items) == keys
p = Panel.from_dict(d)
assert list(p.items) == keys
def test_constructor_resize(self):
with catch_warnings(record=True):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items,
major_axis=major, minor_axis=minor)
expected = self.panel.reindex(
items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
with catch_warnings(record=True):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
assert panel['foo'].values.dtype == np.object_
assert panel['A'].values.dtype == np.float64
def test_constructor_error_msgs(self):
with catch_warnings(record=True):
def testit():
Panel(np.random.randn(3, 4, 5),
lrange(4), lrange(5), lrange(5))
tm.assert_raises_regex(ValueError,
r"Shape of passed values is "
r"\(3, 4, 5\), indices imply "
r"\(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(4), lrange(5))
tm.assert_raises_regex(ValueError,
r"Shape of passed values is "
r"\(3, 4, 5\), indices imply "
r"\(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(5), lrange(4))
tm.assert_raises_regex(ValueError,
r"Shape of passed values is "
r"\(3, 4, 5\), indices imply "
r"\(5, 5, 4\)",
testit)
def test_conform(self):
with catch_warnings(record=True):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
tm.assert_index_equal(conformed.index, self.panel.major_axis)
tm.assert_index_equal(conformed.columns, self.panel.minor_axis)
def test_convert_objects(self):
with catch_warnings(record=True):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_astype(self):
with catch_warnings(record=True):
# GH7271
data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
panel = Panel(data, ['a', 'b'], ['c', 'd'], ['e', 'f'])
str_data = np.array([[['1', '2'], ['3', '4']],
[['5', '6'], ['7', '8']]])
expected = Panel(str_data, ['a', 'b'], ['c', 'd'], ['e', 'f'])
assert_panel_equal(panel.astype(str), expected)
pytest.raises(NotImplementedError, panel.astype, {0: str})
def test_apply(self):
with catch_warnings(record=True):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
with np.errstate(invalid='ignore'):
expected = np.sqrt(self.panel.values)
assert_almost_equal(applied.values, expected)
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'),
index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'),
index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'),
index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(
lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
with catch_warnings(record=True):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
# make sure that we don't trigger any warnings
with catch_warnings(record=True):
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel({ax: f(self.panel.loc[:, :, ax])
for ax in self.panel.minor_axis})
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel({ax: f(self.panel.loc[ax])
for ax in self.panel.items})
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel({ax: f(self.panel.loc[:, ax])
for ax in self.panel.major_axis})
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
with catch_warnings(record=True):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(
lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
with catch_warnings(record=True):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
pytest.raises(Exception, self.panel.reindex,
major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# raise exception put both major and major_axis
pytest.raises(Exception, self.panel.reindex,
minor_axis=new_minor,
minor=new_minor)
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
assert result is not self.panel
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(
major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
assert result is self.panel
def test_reindex_axis_style(self):
with catch_warnings(record=True):
panel = Panel(np.random.rand(5, 5, 5))
expected0 = Panel(panel.values).iloc[[0, 1]]
expected1 = Panel(panel.values).iloc[:, [0, 1]]
expected2 = Panel(panel.values).iloc[:, :, [0, 1]]
result = panel.reindex([0, 1], axis=0)
assert_panel_equal(result, expected0)
result = panel.reindex([0, 1], axis=1)
assert_panel_equal(result, expected1)
result = panel.reindex([0, 1], axis=2)
assert_panel_equal(result, expected2)
result = panel.reindex([0, 1], axis=2)
assert_panel_equal(result, expected2)
def test_reindex_multi(self):
with catch_warnings(record=True):
# with and without copy full reindexing
result = self.panel.reindex(
items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert result.items is self.panel.items
assert result.major_axis is self.panel.major_axis
assert result.minor_axis is self.panel.minor_axis
result = self.panel.reindex(
items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
with catch_warnings(record=True):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
with catch_warnings(record=True):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indices ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
pytest.raises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
with catch_warnings(record=True):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(
sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
with catch_warnings(record=True):
filled = self.panel.fillna(0)
assert np.isfinite(filled.values).all()
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
pytest.raises(ValueError, self.panel.fillna)
pytest.raises(ValueError, self.panel.fillna, 5, method='ffill')
pytest.raises(TypeError, self.panel.fillna, [1, 2])
pytest.raises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
pytest.raises(NotImplementedError,
lambda: p.fillna(999, limit=1))
# Test in place fillNA
# Expected result
expected = Panel([[[0, 1], [2, 1]], [[10, 11], [12, 11]]],
items=['a', 'b'], minor_axis=['x', 'y'],
dtype=np.float64)
# method='ffill'
p1 = Panel([[[0, 1], [2, np.nan]], [[10, 11], [12, np.nan]]],
items=['a', 'b'], minor_axis=['x', 'y'],
dtype=np.float64)
p1.fillna(method='ffill', inplace=True)
assert_panel_equal(p1, expected)
# method='bfill'
p2 = Panel([[[0, np.nan], [2, 1]], [[10, np.nan], [12, 11]]],
items=['a', 'b'], minor_axis=['x', 'y'],
dtype=np.float64)
p2.fillna(method='bfill', inplace=True)
assert_panel_equal(p2, expected)
def test_ffill_bfill(self):
with catch_warnings(record=True):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
with catch_warnings(record=True):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
with catch_warnings(record=True):
result = self.panel.swapaxes('items', 'minor')
assert result.items is self.panel.minor_axis
result = self.panel.swapaxes('items', 'major')
assert result.items is self.panel.major_axis
result = self.panel.swapaxes('major', 'minor')
assert result.major_axis is self.panel.minor_axis
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
assert result.items is self.panel.major_axis
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
assert id(self.panel) != id(result)
def test_transpose(self):
with catch_warnings(record=True):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose(
'minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor',
'major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assert_raises_regex(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assert_raises_regex(ValueError,
'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
pytest.raises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
with catch_warnings(record=True):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
assert notna(result.values[1, 0, 1])
def test_to_frame(self):
with catch_warnings(record=True):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
assert unfiltered.index.names == ('major', 'minor')
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
assert rdf.index.names == df.index.names
assert rdf.columns.names == df.columns.names
def test_to_frame_mixed(self):
with catch_warnings(record=True):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
assert wp['bool'].values.dtype == np.bool_
# Previously, this was mutating the underlying
# index and changing its name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
with catch_warnings(record=True):
idx = MultiIndex.from_tuples(
[(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1],
[3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples(
[(1, 'two'), (1, 'one'), (2, 'one'), (np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1],
[3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
with catch_warnings(record=True):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12],
[3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'],
['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], [
'y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4],
[-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
with catch_warnings(record=True):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples(
[(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
with catch_warnings(record=True):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
assert isna(panel[0].loc[1, [0, 1]]).all()
def test_to_panel_duplicates(self):
# #2441
with catch_warnings(record=True):
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
tm.assert_raises_regex(
ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
with catch_warnings(record=True):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
with catch_warnings(record=True):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
with catch_warnings(record=True):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx),
shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx),
shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel({i: f.shift(-1)[:-1]
for i, f in self.panel.iteritems()})
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame())
for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
with catch_warnings(record=True):
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=BDay())
assert_panel_equal(shifted, shifted3)
tm.assert_raises_regex(ValueError, 'does not match',
ps.tshift, freq='M')
# DatetimeIndex
panel = make_test_panel()
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.iloc[:, [0, 5, 7], :]
pytest.raises(ValueError, no_freq.tshift)
def test_pct_change(self):
with catch_warnings(record=True):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel(
{'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1. / 3, 0.25, 1. / 6]}),
'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
'c2': [.25, .2, 1. / 7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel(
{'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2. / 3, .5, 1. / 3]})})
assert_panel_equal(result, expected)
def test_round(self):
with catch_warnings(record=True):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
assert_panel_equal(expected, result)
def test_numpy_round(self):
with catch_warnings(record=True):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = np.round(p)
assert_panel_equal(expected, result)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, np.round, p, out=p)
def test_multiindex_get(self):
with catch_warnings(record=True):
ind = MultiIndex.from_tuples(
[('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.loc['a']
assert_panel_equal(f1, f2)
assert (f1.items == [1, 2]).all()
assert (f2.items == [1, 2]).all()
MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_multiindex_blocks(self):
with catch_warnings(record=True):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
assert (f1.items == [1, 2]).all()
f1 = wp[('b', 1)]
assert (f1.columns == ['A', 'B', 'C', 'D']).all()
def test_repr_empty(self):
with catch_warnings(record=True):
empty = Panel()
repr(empty)
def test_rename(self):
with catch_warnings(record=True):
mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
renamed = self.panel.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
tm.assert_index_equal(renamed.items, exp)
renamed = self.panel.rename_axis(str.lower, axis=2)
exp = Index(['a', 'b', 'c', 'd'])
tm.assert_index_equal(renamed.minor_axis, exp)
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
assert (self.panel['ItemA'].values == 3).all()
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
with catch_warnings(record=True):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
import xlwt # noqa
import xlrd # noqa
import openpyxl # noqa
from pandas.io.excel import ExcelFile
except ImportError:
pytest.skip("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
with ensure_clean('__tmp__.' + ext) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
pytest.skip("need xlwt xlrd openpyxl")
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd # noqa
import xlsxwriter # noqa
from pandas.io.excel import ExcelFile
except ImportError:
pytest.skip("Requires xlrd and xlsxwriter. Skipping test.")
with ensure_clean('__tmp__.xlsx') as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
pytest.skip("cannot write excel file: %s" % e)
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_dropna(self):
with catch_warnings(record=True):
p = Panel(np.random.randn(4, 5, 6), major_axis=list('abcde'))
p.loc[:, ['b', 'd'], 0] = np.nan
result = p.dropna(axis=1)
exp = p.loc[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
inp = p.copy()
inp.dropna(axis=1, inplace=True)
assert_panel_equal(inp, exp)
result = p.dropna(axis=1, how='all')
assert_panel_equal(result, p)
p.loc[:, ['b', 'd'], :] = np.nan
result = p.dropna(axis=1, how='all')
exp = p.loc[:, ['a', 'c', 'e'], :]
assert_panel_equal(result, exp)
p = Panel(np.random.randn(4, 5, 6), items=list('abcd'))
p.loc[['b'], :, 0] = np.nan
result = p.dropna()
exp = p.loc[['a', 'c', 'd']]
assert_panel_equal(result, exp)
result = p.dropna(how='all')
assert_panel_equal(result, p)
p.loc['b'] = np.nan
result = p.dropna(how='all')
exp = p.loc[['a', 'c', 'd']]
assert_panel_equal(result, exp)
def test_drop(self):
with catch_warnings(record=True):
df = DataFrame({"A": [1, 2], "B": [3, 4]})
panel = Panel({"One": df, "Two": df})
def check_drop(drop_val, axis_number, aliases, expected):
try:
actual = panel.drop(drop_val, axis=axis_number)
assert_panel_equal(actual, expected)
for alias in aliases:
actual = panel.drop(drop_val, axis=alias)
assert_panel_equal(actual, expected)
except AssertionError:
pprint_thing("Failed with axis_number %d and aliases: %s" %
(axis_number, aliases))
raise
# Items
expected = Panel({"One": df})
check_drop('Two', 0, ['items'], expected)
pytest.raises(KeyError, panel.drop, 'Three')
# errors = 'ignore'
dropped = panel.drop('Three', errors='ignore')
assert_panel_equal(dropped, panel)
dropped = panel.drop(['Two', 'Three'], errors='ignore')
expected = Panel({"One": df})
assert_panel_equal(dropped, expected)
# Major
exp_df = DataFrame({"A": [2], "B": [4]}, index=[1])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(0, 1, ['major_axis', 'major'], expected)
exp_df = DataFrame({"A": [1], "B": [3]}, index=[0])
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop([1], 1, ['major_axis', 'major'], expected)
# Minor
exp_df = df[['B']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop(["A"], 2, ['minor_axis', 'minor'], expected)
exp_df = df[['A']]
expected = Panel({"One": exp_df, "Two": exp_df})
check_drop("B", 2, ['minor_axis', 'minor'], expected)
def test_update(self):
with catch_warnings(record=True):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel(
[[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other)
expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[3.6, 2., 3], [1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_from_dict(self):
with catch_warnings(record=True):
pan = Panel({'one': DataFrame([[1.5, np.nan, 3],
[1.5, np.nan, 3],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]),
'two': DataFrame([[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]])})
other = {'two': DataFrame(
[[3.6, 2., np.nan], [np.nan, np.nan, 7]])}
pan.update(other)
expected = Panel(
{'one': DataFrame([[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]),
'two': DataFrame([[3.6, 2., 3],
[1.5, np.nan, 7],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]])
}
)
assert_panel_equal(pan, expected)
def test_update_nooverwrite(self):
with catch_warnings(record=True):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel(
[[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other, overwrite=False)
expected = Panel([[[1.5, np.nan, 3], [1.5, np.nan, 3],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[1.5, 2., 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_filtered(self):
with catch_warnings(record=True):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
other = Panel(
[[[3.6, 2., np.nan], [np.nan, np.nan, 7]]], items=[1])
pan.update(other, filter_func=lambda x: x > 2)
expected = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]],
[[1.5, np.nan, 3], [1.5, np.nan, 7],
[1.5, np.nan, 3.], [1.5, np.nan, 3.]]])
assert_panel_equal(pan, expected)
def test_update_raise(self):
with catch_warnings(record=True):
pan = Panel([[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]],
[[1.5, np.nan, 3.], [1.5, np.nan, 3.],
[1.5, np.nan, 3.],
[1.5, np.nan, 3.]]])
pytest.raises(Exception, pan.update, *(pan, ),
**{'raise_conflict': True})
def test_all_any(self):
assert (self.panel.all(axis=0).values == nanall(
self.panel, axis=0)).all()
assert (self.panel.all(axis=1).values == nanall(
self.panel, axis=1).T).all()
assert (self.panel.all(axis=2).values == nanall(
self.panel, axis=2).T).all()
assert (self.panel.any(axis=0).values == nanany(
self.panel, axis=0)).all()
assert (self.panel.any(axis=1).values == nanany(
self.panel, axis=1).T).all()
assert (self.panel.any(axis=2).values == nanany(
self.panel, axis=2).T).all()
def test_all_any_unhandled(self):
pytest.raises(NotImplementedError, self.panel.all, bool_only=True)
pytest.raises(NotImplementedError, self.panel.any, bool_only=True)
# GH issue 15960
def test_sort_values(self):
pytest.raises(NotImplementedError, self.panel.sort_values)
pytest.raises(NotImplementedError, self.panel.sort_values, 'ItemA')
class TestPanelFrame(object):
"""
Check that conversions to and from Panel to DataFrame work.
"""
def setup_method(self, method):
panel = make_test_panel()
self.panel = panel.to_frame()
self.unfiltered_panel = panel.to_frame(filter_observations=False)
def test_ops_differently_indexed(self):
with catch_warnings(record=True):
# trying to set non-identically indexed panel
wp = self.panel.to_panel()
wp2 = wp.reindex(major=wp.major_axis[:-1])
lp2 = wp2.to_frame()
result = self.panel + lp2
assert_frame_equal(result.reindex(lp2.index), lp2 * 2)
# careful, mutation
self.panel['foo'] = lp2['ItemA']
assert_series_equal(self.panel['foo'].reindex(lp2.index),
lp2['ItemA'],
check_names=False)
def test_ops_scalar(self):
with catch_warnings(record=True):
result = self.panel.mul(2)
expected = DataFrame.__mul__(self.panel, 2)
assert_frame_equal(result, expected)
def test_combineFrame(self):
with catch_warnings(record=True):
wp = self.panel.to_panel()
result = self.panel.add(wp['ItemA'].stack(), axis=0)
assert_frame_equal(result.to_panel()['ItemA'], wp['ItemA'] * 2)
def test_combinePanel(self):
with catch_warnings(record=True):
wp = self.panel.to_panel()
result = self.panel.add(self.panel)
wide_result = result.to_panel()
assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
# one item
result = self.panel.add(self.panel.filter(['ItemA']))
def test_combine_scalar(self):
with catch_warnings(record=True):
result = self.panel.mul(2)
expected = DataFrame(self.panel._data) * 2
assert_frame_equal(result, expected)
def test_combine_series(self):
with catch_warnings(record=True):
s = self.panel['ItemA'][:10]
result = self.panel.add(s, axis=0)
expected = DataFrame.add(self.panel, s, axis=0)
assert_frame_equal(result, expected)
s = self.panel.iloc[5]
result = self.panel + s
expected = DataFrame.add(self.panel, s, axis=1)
assert_frame_equal(result, expected)
def test_operators(self):
with catch_warnings(record=True):
wp = self.panel.to_panel()
result = (self.panel + 1).to_panel()
assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
def test_arith_flex_panel(self):
with catch_warnings(record=True):
ops = ['add', 'sub', 'mul', 'div',
'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
self.panel = self.panel.to_panel()
for n in [np.random.randint(-50, -1), np.random.randint(1, 50), 0]:
for op in ops:
alias = aliases.get(op, op)
f = getattr(operator, alias)
exp = f(self.panel, n)
result = getattr(self.panel, op)(n)
assert_panel_equal(result, exp, check_panel_type=True)
# rops
r_f = lambda x, y: f(y, x)
exp = r_f(self.panel, n)
result = getattr(self.panel, 'r' + op)(n)
assert_panel_equal(result, exp)
def test_sort(self):
def is_sorted(arr):
return (arr[1:] > arr[:-1]).any()
sorted_minor = self.panel.sort_index(level=1)
assert is_sorted(sorted_minor.index.labels[1])
sorted_major = sorted_minor.sort_index(level=0)
assert is_sorted(sorted_major.index.labels[0])
def test_to_string(self):
buf = StringIO()
self.panel.to_string(buf)
def test_to_sparse(self):
if isinstance(self.panel, Panel):
msg = 'sparsifying is not supported'
tm.assert_raises_regex(NotImplementedError, msg,
self.panel.to_sparse)
def test_truncate(self):
with catch_warnings(record=True):
dates = self.panel.index.levels[0]
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(start, end)
# TODO truncate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
trunced = self.panel.truncate(before=start).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(before=start)
# TODO truncate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
trunced = self.panel.truncate(after=end).to_panel()
expected = self.panel.to_panel()['ItemA'].truncate(after=end)
# TODO truncate drops index.names
assert_frame_equal(trunced['ItemA'], expected, check_names=False)
# truncate on dates that aren't in there
wp = self.panel.to_panel()
new_index = wp.major_axis[::5]
wp2 = wp.reindex(major=new_index)
lp2 = wp2.to_frame()
lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
assert_panel_equal(wp_trunc, lp_trunc.to_panel())
# throw proper exception
pytest.raises(Exception, lp2.truncate, wp.major_axis[-2],
wp.major_axis[2])
def test_axis_dummies(self):
from pandas.core.reshape.reshape import make_axis_dummies
minor_dummies = make_axis_dummies(self.panel, 'minor').astype(np.uint8)
assert len(minor_dummies.columns) == len(self.panel.index.levels[1])
major_dummies = make_axis_dummies(self.panel, 'major').astype(np.uint8)
assert len(major_dummies.columns) == len(self.panel.index.levels[0])
mapping = {'A': 'one', 'B': 'one', 'C': 'two', 'D': 'two'}
transformed = make_axis_dummies(self.panel, 'minor',
transform=mapping.get).astype(np.uint8)
assert len(transformed.columns) == 2
tm.assert_index_equal(transformed.columns, Index(['one', 'two']))
# TODO: test correctness
def test_get_dummies(self):
from pandas.core.reshape.reshape import get_dummies, make_axis_dummies
self.panel['Label'] = self.panel.index.labels[1]
minor_dummies = make_axis_dummies(self.panel, 'minor').astype(np.uint8)
dummies = get_dummies(self.panel['Label'])
tm.assert_numpy_array_equal(dummies.values, minor_dummies.values)
def test_mean(self):
with catch_warnings(record=True):
means = self.panel.mean(level='minor')
# test versus Panel version
wide_means = self.panel.to_panel().mean('major')
assert_frame_equal(means, wide_means)
def test_sum(self):
with catch_warnings(record=True):
sums = self.panel.sum(level='minor')
# test versus Panel version
wide_sums = self.panel.to_panel().sum('major')
assert_frame_equal(sums, wide_sums)
def test_count(self):
with catch_warnings(record=True):
index = self.panel.index
major_count = self.panel.count(level=0)['ItemA']
labels = index.labels[0]
for i, idx in enumerate(index.levels[0]):
assert major_count[i] == (labels == i).sum()
minor_count = self.panel.count(level=1)['ItemA']
labels = index.labels[1]
for i, idx in enumerate(index.levels[1]):
assert minor_count[i] == (labels == i).sum()
def test_join(self):
with catch_warnings(record=True):
lp1 = self.panel.filter(['ItemA', 'ItemB'])
lp2 = self.panel.filter(['ItemC'])
joined = lp1.join(lp2)
assert len(joined.columns) == 3
pytest.raises(Exception, lp1.join,
self.panel.filter(['ItemB', 'ItemC']))
def test_pivot(self):
with catch_warnings(record=True):
from pandas.core.reshape.reshape import _slow_pivot
one, two, three = (np.array([1, 2, 3, 4, 5]),
np.array(['a', 'b', 'c', 'd', 'e']),
np.array([1, 2, 3, 5, 4.]))
df = pivot(one, two, three)
assert df['a'][1] == 1
assert df['b'][2] == 2
assert df['c'][3] == 3
assert df['d'][4] == 5
assert df['e'][5] == 4
assert_frame_equal(df, _slow_pivot(one, two, three))
# weird overlap, TODO: test?
a, b, c = (np.array([1, 2, 3, 4, 4]),
np.array(['a', 'a', 'a', 'a', 'a']),
np.array([1., 2., 3., 4., 5.]))
pytest.raises(Exception, pivot, a, b, c)
# corner case, empty
df = pivot(np.array([]), np.array([]), np.array([]))
def test_panel_index():
index = panelm.panel_index([1, 2, 3, 4], [1, 2, 3])
expected = MultiIndex.from_arrays([np.tile([1, 2, 3, 4], 3),
np.repeat([1, 2, 3], 4)],
names=['time', 'panel'])
tm.assert_index_equal(index, expected)
| bsd-3-clause |
jskDr/jamespy_py3 | kcell_r1.py | 1 | 2473 | # kcell.py
# python3
import pandas as pd
from sklearn import cross_validation, svm, metrics, cluster, tree, ensemble
import kkeras
def clst( X_train, y_train, X_test, y_test, nb_classes):
model = tree.DecisionTreeClassifier()
model.fit( X_train, y_train)
dt_score = model.score( X_test, y_test)
print( "DT-C:", dt_score)
model = svm.SVC( kernel = 'linear')
model.fit( X_train, y_train)
sv_score = model.score( X_test, y_test)
print( "SVC:", sv_score)
model = kkeras.MLPC( [X_train.shape[1], 30, 10, nb_classes])
model.fit( X_train, y_train, X_test, y_test, nb_classes)
mlp_score = model.score( X_test, y_test)
print( "DNN:", mlp_score)
model = ensemble.RandomForestClassifier( n_estimators=10)
model.fit( X_train, y_train)
rf_score = model.score( X_test, y_test)
print( "RF:", rf_score)
return dt_score, sv_score, mlp_score, rf_score
def GET_clsf2_by_clst( nb_classes):
def clsf2_by_clst( Xpart_cf, Xpart_ct):
"""
Clustering is performed and then, classification performed by clustered indices.
"""
cl_model = cluster.KMeans(n_clusters=nb_classes)
cl_model.fit(Xpart_ct)
yint = cl_model.predict( Xpart_ct)
X_train, X_test, y_train, y_test = cross_validation.train_test_split( Xpart_cf, yint, test_size = 0.2)
return clst(X_train, y_train, X_test, y_test, nb_classes)
return clsf2_by_clst
def GET_clsf2_by_yint( nb_classes):
def clsf2_by_yint( X1part, yint):
"""
classification is performed by yint
"""
X_train, X_test, y_train, y_test = cross_validation.train_test_split( X1part, yint, test_size = 0.2)
return clst(X_train, y_train, X_test, y_test, nb_classes)
return clsf2_by_yint
def pd_df( ix, s_l, ms):
VI = {1:"Velocity", 2:"Intensity", 12:"Combined"}
ln = len( s_l)
df_i = pd.DataFrame()
df_i["Type"] = ["{}: ".format(ms) + str( ix)] * ln
df_i["Clustering"] = [ VI[ix[0]]] * ln
df_i["Classification"] = [ VI[ix[1]]] * ln
df_i["Clustering method"] = [ "KMeans"] * ln
df_i["Classification method"] = [ "DT", "SVC", "DNN", "RF"]
df_i["Pc"] = s_l
return df_i
def pd_clsf2_by_clst( ix, Xpart_ct, Xpart_cf, nb_classes):
print( "Type", ix, "- Clustering:", ix[1], "Classification:", ix[0])
s_l = GET_clsf2_by_clst(nb_classes)(Xpart_cf, Xpart_ct)
return pd_df( ix, s_l, "KMeans")
def pd_clsf2_by_yint( ix, yint, Xpart_cf, nb_classes):
print( "Type", ix, "- Clustering:", ix[1], "Classification:", ix[0])
s_l = GET_clsf2_by_yint(nb_classes)(Xpart_cf, yint)
return pd_df( ix, s_l, "Science")
| mit |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_chroma/dnn6.py | 7 | 37780 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Acoustic Scene Classification / Baseline System
import argparse
import textwrap
import timeit
import skflow
from sklearn import mixture
from sklearn import preprocessing as pp
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from src.dataset import *
from src.evaluation import *
from src.features import *
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
final_result = {}
train_start = 0.0
train_end = 0.0
test_start = 0.0
test_end = 0.0
def main(argv):
numpy.random.seed(123456) # let's make randomization predictable
tot_start = timeit.default_timer()
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 1: Acoustic Scene Classification
Baseline system
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( [email protected] )
System description
This is an baseline implementation for D-CASE 2016 challenge acoustic scene classification task.
Features: MFCC (static+delta+acceleration)
Classifier: GMM
'''))
# Setup argument handling
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
parameter_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.splitext(os.path.basename(__file__))[0] + '.yaml')
params = load_parameters(parameter_file)
params = process_parameters(params)
make_folders(params)
title("DCASE 2016::Acoustic Scene Classification / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction')
# Collect files in train sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
files = sorted(files)
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training')
train_start = timeit.default_timer()
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
classifier_params=params['classifier']['parameters'],
classifier_method=params['classifier']['method'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite']
)
train_end = timeit.default_timer()
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing')
test_start = timeit.default_timer()
do_system_testing(dataset=dataset,
feature_path=params['path']['features'],
result_path=params['path']['results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
test_end = timeit.default_timer()
foot()
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation')
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'])
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing with challenge data')
do_system_testing(dataset=challenge_dataset,
feature_path=params['path']['features'],
result_path=params['path']['challenge_results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at [" + params['path']['challenge_results'] + "]"
print " "
tot_end = timeit.default_timer()
print " "
print "Train Time : " + str(train_end - train_start)
print " "
print " "
print "Test Time : " + str(test_end - test_start)
print " "
print " "
print "Total Time : " + str(tot_end - tot_start)
print " "
final_result['train_time'] = train_end - train_start
final_result['test_time'] = test_end - test_start
final_result['tot_time'] = tot_end - tot_start
joblib.dump(final_result, 'result.pkl')
return 0
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
# Convert feature extraction window and hop sizes seconds to samples
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
# Paths
params['path']['data'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['data'])
params['path']['base'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['base'])
# Features
params['path']['features_'] = params['path']['features']
params['path']['features'] = os.path.join(params['path']['base'],
params['path']['features'],
params['features']['hash'])
# Feature normalizers
params['path']['feature_normalizers_'] = params['path']['feature_normalizers']
params['path']['feature_normalizers'] = os.path.join(params['path']['base'],
params['path']['feature_normalizers'],
params['features']['hash'])
# Models
params['path']['models_'] = params['path']['models']
params['path']['models'] = os.path.join(params['path']['base'],
params['path']['models'],
params['features']['hash'], params['classifier']['hash'])
# Results
params['path']['results_'] = params['path']['results']
params['path']['results'] = os.path.join(params['path']['base'],
params['path']['results'],
params['features']['hash'], params['classifier']['hash'])
return params
def make_folders(params, parameter_filename='parameters.yaml'):
"""Create all needed folders, and saves parameters in yaml-file for easier manual browsing of data.
Parameters
----------
params : dict
parameters in dict
parameter_filename : str
filename to save parameters used to generate the folder name
Returns
-------
nothing
"""
# Check that target path exists, create if not
check_path(params['path']['features'])
check_path(params['path']['feature_normalizers'])
check_path(params['path']['models'])
check_path(params['path']['results'])
# Save parameters into folders to help manual browsing of files.
# Features
feature_parameter_filename = os.path.join(params['path']['features'], parameter_filename)
if not os.path.isfile(feature_parameter_filename):
save_parameters(feature_parameter_filename, params['features'])
# Feature normalizers
feature_normalizer_parameter_filename = os.path.join(params['path']['feature_normalizers'], parameter_filename)
if not os.path.isfile(feature_normalizer_parameter_filename):
save_parameters(feature_normalizer_parameter_filename, params['features'])
# Models
model_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(model_features_parameter_filename):
save_parameters(model_features_parameter_filename, params['features'])
model_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(model_models_parameter_filename):
save_parameters(model_models_parameter_filename, params['classifier'])
# Results
# Save parameters into folders to help manual browsing of files.
result_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(result_features_parameter_filename):
save_parameters(result_features_parameter_filename, params['features'])
result_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(result_models_parameter_filename):
save_parameters(result_models_parameter_filename, params['classifier'])
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
audio_filename = os.path.split(audio_file)[1]
return os.path.join(path, os.path.splitext(audio_filename)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
def get_model_filename(fold, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '.' + extension)
def get_result_filename(fold, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
# Check that target path exists, create if not
check_path(feature_path)
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio data
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True,
fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
if params['method'] == 'lfcc':
feature_file_txt = get_feature_filename(audio_file=os.path.split(audio_filename)[1],
path=feature_path,
extension='txt')
feature_data = feature_extraction_lfcc(feature_file_txt)
else:
# feature_data['feat'].shape is (1501, 60)
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds',
overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
# Check that target path exists, create if not
check_path(feature_normalizer_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Initialize statistics
file_count = len(dataset.train(fold))
normalizer = FeatureNormalizer()
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
if os.path.isfile(get_feature_filename(audio_file=item['file'], path=feature_path)):
feature_data = load_data(get_feature_filename(audio_file=item['file'], path=feature_path))['stat']
else:
raise IOError("Feature file not found [%s]" % (item['file']))
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, classifier_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System training
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(model_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_model_file = get_model_filename(fold=fold, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Collect training examples
file_count = len(dataset.train(fold))
data = {}
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Features not found [%s]" % (item['file']))
# Scale features
feature_data = model_container['normalizer'].normalize(feature_data)
# Store features per class label
if item['scene_label'] not in data:
data[item['scene_label']] = feature_data
else:
data[item['scene_label']] = numpy.vstack((data[item['scene_label']], feature_data))
le = pp.LabelEncoder()
tot_data = {}
# Train models for each class
for label in data:
progress(title_text='Train models',
fold=fold,
note=label)
if classifier_method == 'gmm':
model_container['models'][label] = mixture.GMM(**classifier_params).fit(data[label])
elif classifier_method == 'dnn':
if 'x' not in tot_data:
tot_data['x'] = data[label]
tot_data['y'] = numpy.repeat(label, len(data[label]), axis=0)
else:
tot_data['x'] = numpy.vstack((tot_data['x'], data[label]))
tot_data['y'] = numpy.hstack((tot_data['y'], numpy.repeat(label, len(data[label]), axis=0)))
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
clf = skflow.TensorFlowDNNClassifier(**classifier_params)
if classifier_method == 'dnn':
tot_data['y'] = le.fit_transform(tot_data['y'])
clf.fit(tot_data['x'], tot_data['y'])
clf.save('dnn/dnnmodel1')
# Save models
save_data(current_model_file, model_container)
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(result_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_result_file = get_result_filename(fold=fold, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold))
for file_id, item in enumerate(dataset.test(fold)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(item['file']), mono=True,
fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % (item['file']))
if feature_params['method'] == 'lfcc':
feature_file_txt = get_feature_filename(audio_file=os.path.split(item['file'])[1],
path=feature_path,
extension='txt')
feature_data = feature_extraction_lfcc(feature_file_txt)
else:
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
# Do classification for the block
if classifier_method == 'gmm':
current_result = do_classification_gmm(feature_data, model_container)
current_class = current_result['class']
elif classifier_method == 'dnn':
current_result = do_classification_dnn(feature_data, model_container)
current_class = dataset.scene_labels[current_result['class_id']]
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Store the result
if classifier_method == 'gmm':
results.append((dataset.absolute_to_relative(item['file']),
current_class))
elif classifier_method == 'dnn':
logs_in_tuple = tuple(lo for lo in current_result['logls'])
results.append((dataset.absolute_to_relative(item['file']),
current_class) + logs_in_tuple)
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_classification_dnn(feature_data, model_container):
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(15)
logls.fill(-numpy.inf)
model_clf = skflow.TensorFlowEstimator.restore('dnn/dnnmodel1')
logls = numpy.sum(numpy.log(model_clf.predict_proba(feature_data)), 0)
classification_result_id = numpy.argmax(logls)
return {'class_id': classification_result_id,
'logls': logls}
def do_classification_gmm(feature_data, model_container):
"""GMM classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(len(model_container['models']))
logls.fill(-numpy.inf)
for label_id, label in enumerate(model_container['models']):
logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
classification_result_id = numpy.argmax(logls)
return {'class': model_container['models'].keys()[classification_result_id],
'logls': logls}
def do_system_evaluation(dataset, result_path, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results_fold = []
tot_cm = numpy.zeros((dataset.scene_label_count, dataset.scene_label_count))
for fold in dataset.folds(mode=dataset_evaluation_mode):
dcase2016_scene_metric_fold = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results = []
result_filename = get_result_filename(fold=fold, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
# Rewrite the result file
if os.path.isfile(result_filename):
with open(result_filename+'2', 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
y_true = (dataset.file_meta(result_item[0])[0]['scene_label'],)
print type(y_true)
print type(result_item)
writer.writerow(y_true + tuple(result_item))
y_true = []
y_pred = []
for result in results:
y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
y_pred.append(result[1])
dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
dcase2016_scene_metric_fold.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
results_fold.append(dcase2016_scene_metric_fold.results())
tot_cm += confusion_matrix(y_true, y_pred)
final_result['tot_cm'] = tot_cm
final_result['tot_cm_acc'] = numpy.sum(numpy.diag(tot_cm)) / numpy.sum(tot_cm)
results = dcase2016_scene_metric.results()
final_result['result'] = results
print " File-wise evaluation, over %d folds" % dataset.fold_count
fold_labels = ''
separator = ' =====================+======+======+==========+ +'
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_labels += " {:8s} |".format('Fold' + str(fold))
separator += "==========+"
print " {:20s} | {:4s} : {:4s} | {:8s} | |".format('Scene label', 'Nref', 'Nsys', 'Accuracy') + fold_labels
print separator
for label_id, label in enumerate(sorted(results['class_wise_accuracy'])):
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['class_wise_accuracy'][label] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format(label,
results['class_wise_data'][label]['Nref'],
results['class_wise_data'][label]['Nsys'],
results['class_wise_accuracy'][
label] * 100) + fold_values
print separator
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['overall_accuracy'] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format('Overall accuracy',
results['Nref'],
results['Nsys'],
results['overall_accuracy'] * 100) + fold_values
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| mit |
eramirem/astroML | book_figures/chapter8/fig_rbf_ridge_mu_z.py | 3 | 4774 | """
Regularized Regression Example
------------------------------
Figure 8.4
Regularized regression for the same sample as Fig. 8.2. Here we use Gaussian
basis function regression with a Gaussian of width sigma = 0.2 centered at 100
regular intervals between 0 < z < 2. The lower panels show the best-fit weights
as a function of basis function position. The left column shows the results
with no regularization: the basis function weights w are on the order of 108,
and overfitting is evident. The middle column shows ridge regression (L2
regularization) with alpha = 0.005, and the right column shows LASSO regression
(L1 regularization) with alpha = 0.005. All three methods are fit without the
bias term (intercept).
Changes from Published Version
++++++++++++++++++++++++++++++
Note that this figure has been changed slightly from its published version:
the original version of the figure did not take into account data errors. The
update (as of astroML version 0.3) correctly takes into account data errors.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import lognorm
from astroML.linear_model import LinearRegression
from astroML.cosmology import Cosmology
from astroML.datasets import generate_mu_z
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#----------------------------------------------------------------------
# generate data
np.random.seed(0)
z_sample, mu_sample, dmu = generate_mu_z(100, random_state=0)
cosmo = Cosmology()
z = np.linspace(0.01, 2, 1000)
mu = np.asarray([cosmo.mu(zi) for zi in z])
#------------------------------------------------------------
# Manually convert data to a gaussian basis
# note that we're ignoring errors here, for the sake of example.
def gaussian_basis(x, mu, sigma):
return np.exp(-0.5 * ((x - mu) / sigma) ** 2)
centers = np.linspace(0, 1.8, 100)
widths = 0.2
X = gaussian_basis(z_sample[:, np.newaxis], centers, widths)
#------------------------------------------------------------
# Set up the figure to plot the results
fig = plt.figure(figsize=(5, 2.7))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
hspace=0.15, wspace=0.2)
regularization = ['none', 'l2', 'l1']
kwargs = [dict(), dict(alpha=0.005), dict(alpha=0.001)]
labels = ['Linear Regression', 'Ridge Regression', 'Lasso Regression']
for i in range(3):
clf = LinearRegression(regularization=regularization[i],
fit_intercept=True, kwds=kwargs[i])
clf.fit(X, mu_sample, dmu)
w = clf.coef_[1:]
fit = clf.predict(gaussian_basis(z[:, None], centers, widths))
# plot fit
ax = fig.add_subplot(231 + i)
ax.xaxis.set_major_formatter(plt.NullFormatter())
# plot curves for regularized fits
if i == 0:
ax.set_ylabel('$\mu$')
else:
ax.yaxis.set_major_formatter(plt.NullFormatter())
curves = 37 + w * gaussian_basis(z[:, np.newaxis], centers, widths)
curves = curves[:, abs(w) > 0.01]
ax.plot(z, curves,
c='gray', lw=1, alpha=0.5)
ax.plot(z, fit, '-k')
ax.plot(z, mu, '--', c='gray')
ax.errorbar(z_sample, mu_sample, dmu, fmt='.k', ecolor='gray', lw=1, ms=4)
ax.set_xlim(0.001, 1.8)
ax.set_ylim(36, 52)
ax.text(0.05, 0.93, labels[i],
ha='left', va='top',
bbox=dict(boxstyle='round', ec='k', fc='w'),
transform=ax.transAxes)
# plot weights
ax = plt.subplot(234 + i)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.5))
ax.set_xlabel('$z$')
if i == 0:
ax.set_ylabel(r'$\theta$')
w *= 1E-12
ax.text(0, 1.01, r'$\rm \times 10^{12}$',
transform=ax.transAxes)
ax.scatter(centers, w, s=9, lw=0, c='k')
ax.set_xlim(-0.05, 1.8)
if i == 1:
ax.set_ylim(-2, 4)
elif i == 2:
ax.set_ylim(-0.5, 2)
ax.text(0.05, 0.93, labels[i],
ha='left', va='top',
bbox=dict(boxstyle='round', ec='k', fc='w'),
transform=ax.transAxes)
plt.show()
| bsd-2-clause |
nesterione/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
mzie/RATRACTION | OneStopTrack.py | 1 | 41159 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 27 00:09:18 2016
@author: Matt
"""
# import necessary packages and modules
from PyQt4 import Qt
from PyQt4 import (QtGui, QtCore)
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from time import sleep
from time import strftime
import time
import datetime
import time
import sys
import cv2
from collections import (deque, OrderedDict)
import vid_tracking_methods
import Arduino_Wizard_v2
import Camera_Wizard_v2
import setup_arena_picture
class EmittingStream(Qt.QObject):
textWritten = Qt.pyqtSignal(str)
def write(self, text):
self.textWritten.emit(str(text))
class ButtonLineEdit(Qt.QLineEdit):
buttonClicked = Qt.pyqtSignal(bool)
def __init__(self, parent=None):
super(ButtonLineEdit, self).__init__(parent)
self.button = Qt.QToolButton(self)
self.button.setIcon(Qt.QIcon('open_file_icon.png'))
self.button.setStyleSheet('border: 0px; padding: 0px;')
self.button.setCursor(QtCore.Qt.ArrowCursor)
self.button.clicked.connect(self.buttonClicked.emit)
frameWidth = self.style().pixelMetric(QtGui.QStyle.PM_DefaultFrameWidth)
buttonSize = self.button.sizeHint()
self.setStyleSheet('QLineEdit {padding-right: %dpx; }' % (buttonSize.width() + frameWidth + 1))
self.setMinimumSize(max(self.minimumSizeHint().width(), buttonSize.width() + frameWidth*2 + 2),
max(self.minimumSizeHint().height(), buttonSize.height() + frameWidth*2 + 2))
def resizeEvent(self, event):
buttonSize = self.button.sizeHint()
frameWidth = self.style().pixelMetric(QtGui.QStyle.PM_DefaultFrameWidth)
self.button.move(self.rect().right() - frameWidth - buttonSize.width(),
(self.rect().bottom() - buttonSize.height() + 1)/2)
super(ButtonLineEdit, self).resizeEvent(event)
class VideoTracking(Qt.QObject):
"""
Object managing running the video tracking code
"""
def __init__(self):
super(VideoTracking, self).__init__()
global _isRunning
_isRunning = True
def Run(self):
global _isRunning, vidTrack_setup_parameters, camera, video_name, recording, record_name
if not _isRunning:
_isRunning = True
date = strftime("%Y-%m-%d")
start_time = strftime("%H:%M:%S")
try:
if vidTrack_setup_parameters == None:
try:
from Camera_Wizard_v2 import vidTrack_setup_parameters
except:
pass
if camera:
if vidTrack_setup_parameters['video_tracking_algorithm'] == "MOG":
if not recording:
live_MOG_trck = vid_tracking_methods.live_mog_tracking(vidTrack_setup_parameters)
return live_MOG_trck
elif recording and vidTrack_setup_parameters != None:
recorded_live_MOG_trck = vid_tracking_methods.live_mog_tracking(vidTrack_setup_parameters, recording, record_name)
return recorded_live_MOG_trck
elif vidTrack_setup_parameters['video_tracking_algorithm'] == "Frame Differencing":
if not recording:
live_FD_trck = vid_tracking_methods.live_fd_tracking(vidTrack_setup_parameters)
return live_FD_trck
elif recording and vidTrack_setup_parameters != None:
recorded_live_FD_trck = vid_tracking_methods.live_fd_tracking(vidTrack_setup_parameters, recording, record_name)
return recorded_live_FD_trck
elif vidTrack_setup_parameters['video_tracking_algorithm'] == "None":
if not recording:
live_cam_feed = vid_tracking_methods.live_camera_feed()
return live_cam_feed
elif recording:
recorded_live_cam_feed = vid_tracking_methods.live_camera_feed(recording, record_name)
return recorded_live_cam_feed
elif video_name:
if vidTrack_setup_parameters['video_tracking_algorithm'] == "MOG":
vid_MOG_trck = vid_tracking_methods.vid_mog_tracking(video_name, vidTrack_setup_parameters)
return vid_MOG_trck
elif vidTrack_setup_parameters['video_tracking_algorithm'] == "Frame Differencing":
vid_FD_trck = vid_tracking_methods.vid_fd_tracking(video_name, vidTrack_setup_parameters)
return vid_FD_trck
elif vidTrack_setup_parameters['video_tracking_algorithm'] == "None":
recorded_vid_feed = vid_tracking_methods.video_feed(video_name, vidTrack_setup_parameters)
return recorded_vid_feed
else:
print("video tracking setup is either missing or contains an error/s")
except:
print("video tracking setup is either missing or contains an error/s")
def stop(self):
global _isRunning
_isRunning = False
class Arduino(Qt.QObject):
"""
Object managing running the Arduino code
"""
def __init__(self):
super(Arduino, self).__init__()
global _isRunning2
_isRunning2 = True
self.actButton1_on = False
self.actButton2_on = False
self.actButton3_on = False
self.actButton4_on = False
self.actButton5_on = False
self.actButton6_on = False
self.actButton7_on = False
self.actButton8_on = False
def Run(self):
global ard_results, _isRunning2, ard_setup_parameters, a, mod_pt, run_tme_
ard_results = {}
if not _isRunning2:
_isRunning2 = True
try:
a
except:
self.stop()
return None
try:
if ard_setup_parameters == None:
try:
from Arduino_Wizard_v2 import ard_setup_parameters
except:
pass
pins = ard_setup_parameters['pins']
naming_list = ard_setup_parameters['naming_list']
setup_list = ard_setup_parameters['setup_list']
loop_list = ard_setup_parameters['loop_list']
for key in pins.keys():
ard_results[pins[key][0]] = []
ard_results['ard_loop_time'] = []
for name in naming_list:
exec(name)
for line in setup_list:
exec(line)
start = float(time.time())
while _isRunning2:
millis = float(time.time())
current_loop_time = millis - start
ard_results['ard_loop_time'].append(round(current_loop_time, 2))
for line in loop_list:
try:
exec(line)
except:
continue
for key in pins.keys():
if pins[key][1] == "OUTPUT":
exec("a.digitalWrite(%s, a.LOW)" %(pins[key][0]))
except:
self.stop()
print("DAAC setup is either missing or contains an error/s")
def stop(self):
global _isRunning2
_isRunning2 = False
def actButton1(self, checked):
if checked:
self.actButton1_on = True
elif not checked:
self.actButton1_on = False
def actButton2(self, checked):
if checked:
self.actButton2_on = True
elif not checked:
self.actButton2_on = False
def actButton3(self, checked):
if checked:
self.actButton2_on = True
elif not checked:
self.actButton2_on = False
def actButton4(self, checked):
if checked:
self.actButton2_on = True
elif not checked:
self.actButton2_on = False
def actButton5(self, checked):
if checked:
self.actButton2_on = True
elif not checked:
self.actButton2_on = False
def actButton6(self, checked):
if checked:
self.actButton2_on = True
elif not checked:
self.actButton2_on = False
def actButton7(self, checked):
if checked:
self.actButton2_on = True
elif not checked:
self.actButton2_on = False
def actButton8(self, checked):
if checked:
self.actButton2_on = True
elif not checked:
self.actButton2_on = False
class Window(Qt.QWidget):
def __init__(self):
super(Window, self).__init__()
self.startButton = Qt.QPushButton()
self.startButton.setFixedSize(100,100)
self.startButton.setIcon(Qt.QIcon('Green_Start.png'))
self.startButton.setIconSize(Qt.QSize(75,75))
self.stopButton = Qt.QPushButton()
self.stopButton.setFixedSize(100,100)
self.stopButton.setIcon(Qt.QIcon('Red_Stop.png'))
self.stopButton.setIconSize(Qt.QSize(75,75))
self.vidThread = Qt.QThread()
self.ardThread = Qt.QThread()
self.vidThread.start()
self.ardThread.start()
self.vidTracking = VideoTracking()
self.vidTracking.moveToThread(self.vidThread)
self.arduino = Arduino()
self.arduino.moveToThread(self.ardThread)
self.stopButton.clicked.connect(lambda: self.arduino.stop())
self.stopButton.clicked.connect(lambda: self.vidTracking.stop())
self.startButton.clicked.connect(self.arduino.Run)
self.startButton.clicked.connect(self.start_pressed)
self.table = Qt.QTableWidget()
self.table_rowCount = 0
self.table.setRowCount(self.table_rowCount)
self.table.setColumnCount(6)
self.table.setHorizontalHeaderLabels(['Trial #', 'Date', 'Start Time', 'End Time', 'Duration (sec)', 'Comments'])
#self.table.horizontalHeader().setResizeMode(Qt.QHeaderView.Stretch)
self.table.horizontalHeader().setStretchLastSection(True)
self.table.cellChanged.connect(self.cell_was_clicked)
boldFont = Qt.QFont()
boldFont.setBold(True)
label1 = Qt.QLabel("Python Console Output")
label1.setFont(boldFont)
self.txtEdt1 = Qt.QTextEdit()
self.txtEdt1.setReadOnly(True)
# Install the custom output stream
sys.stdout = EmittingStream(textWritten = self.normalOutputWritten)
self.actButton1 = Qt.QPushButton(self.tr("&actButton1"))
self.actButton1.setCheckable(True)
self.actButton1.toggled.connect(lambda: self.arduino.actButton1(self.actButton1.isChecked()))
self.actButton2 = Qt.QPushButton(self.tr("&actButton2"))
self.actButton2.setCheckable(True)
self.actButton2.toggled.connect(lambda: self.arduino.actButton2(self.actButton2.isChecked()))
self.actButton3 = Qt.QPushButton(self.tr("&actButton3"))
self.actButton3.setCheckable(True)
self.actButton3.toggled.connect(lambda: self.arduino.actButton3(self.actButton3.isChecked()))
self.actButton4 = Qt.QPushButton(self.tr("&actButton4"))
self.actButton4.setCheckable(True)
self.actButton4.toggled.connect(lambda: self.arduino.actButton4(self.actButton4.isChecked()))
self.actButton5 = Qt.QPushButton(self.tr("&actButton5"))
self.actButton5.setCheckable(True)
self.actButton5.toggled.connect(lambda: self.arduino.actButton5(self.actButton5.isChecked()))
self.actButton6 = Qt.QPushButton(self.tr("&actButton6"))
self.actButton6.setCheckable(True)
self.actButton6.toggled.connect(lambda: self.arduino.actButton6(self.actButton6.isChecked()))
self.actButton7 = Qt.QPushButton(self.tr("&actButton7"))
self.actButton7.setCheckable(True)
self.actButton7.toggled.connect(lambda: self.arduino.actButton7(self.actButton7.isChecked()))
self.actButton8 = Qt.QPushButton(self.tr("&actButton8"))
self.actButton8.setCheckable(True)
self.actButton8.toggled.connect(lambda: self.arduino.actButton8(self.actButton8.isChecked()))
self.frame4 = Qt.QFrame()
self.frame4.setFrameStyle(1)
self.frame4.setFixedSize(350,370)
self.arena_setup_new_edit = Qt.QPushButton("New/Edit")
self.arena_setup_new_edit.clicked.connect(self.new_edit_arena_setup)
self.arena_setup_load = Qt.QPushButton("Load")
self.arena_setup_load.clicked.connect(self.load_arena_setup)
self.arena_setup_save = Qt.QPushButton("Save")
self.arena_setup_save.clicked.connect(self.save_arena_setup)
layout6 = Qt.QHBoxLayout()
layout6.addWidget(self.arena_setup_new_edit)
layout6.addWidget(self.arena_setup_load)
layout6.addWidget(self.arena_setup_save)
self.main_frame = Qt.QWidget()
self.main_frame.setFixedSize(325,325)
self.fig = Figure((5.0, 4.0), dpi=100)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
self.fig.canvas.mpl_connect("motion_notify_event", self.mouse_movement)
self.fig_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.fig_toolbar.hide()
self.homeBtn = Qt.QPushButton()
self.homeBtn.setFixedSize(25,25)
self.homeBtn.setIcon(Qt.QIcon('home.png'))
self.homeBtn.setIconSize(Qt.QSize(20,20))
self.homeBtn.clicked.connect(self.home)
self.panBtn = Qt.QPushButton()
self.panBtn.setCheckable(True)
self.panBtn.setFixedSize(25,25)
self.panBtn.setIcon(Qt.QIcon('move.png'))
self.panBtn.setIconSize(Qt.QSize(20,20))
self.panBtn.clicked.connect(self.pan)
self.zoomBtn = Qt.QPushButton()
self.zoomBtn.setCheckable(True)
self.zoomBtn.setFixedSize(25,25)
self.zoomBtn.setIcon(Qt.QIcon('zoom_to_rect.png'))
self.zoomBtn.setIconSize(Qt.QSize(20,20))
self.zoomBtn.clicked.connect(self.zoom_to_rect)
self.lblsBtn = Qt.QPushButton()
self.lblsBtn.setCheckable(True)
self.lblsBtn.setFixedSize(25,25)
self.lblsBtn.setIcon(Qt.QIcon('label_icon.png'))
self.lblsBtn.setIconSize(Qt.QSize(20,20))
self.lblsBtn.clicked.connect(self.show_hide_labels)
self.drawBtn = Qt.QPushButton()
self.drawBtn.setFixedSize(25,25)
self.drawBtn.setIcon(Qt.QIcon('refresh_icon.jpeg'))
self.drawBtn.setIconSize(Qt.QSize(20,20))
self.drawBtn.clicked.connect(self.redraw_arena_setup)
self.coords_label = Qt.QLabel()
self.coords_label.setAlignment(QtCore.Qt.AlignCenter)
self.fig_statusbar = Qt.QStatusBar()
self.fig_statusbar.setSizeGripEnabled(False)
self.fig_statusbar.addWidget(self.homeBtn)
self.fig_statusbar.addWidget(self.panBtn)
self.fig_statusbar.addWidget(self.zoomBtn)
self.fig_statusbar.addWidget(self.lblsBtn)
self.fig_statusbar.addWidget(self.drawBtn)
self.fig_statusbar.addWidget(self.coords_label, 1)
frame_layout4 = Qt.QVBoxLayout()
frame_layout4.addLayout(layout6)
frame_layout4.addWidget(self.main_frame)
frame_layout4.addWidget(self.fig_statusbar)
self.frame4.setLayout(frame_layout4)
self.radBtn1 = Qt.QRadioButton(self.tr("Connect to Raspberry Pi Camera"))
self.radBtn1.setFont(boldFont)
self.radBtn1.setChecked(False)
self.radBtn1.toggled.connect(self.connect_to_camera)
self.radBtn4 = Qt.QRadioButton(self.tr("Video Record Trial"))
self.radBtn4.setChecked(False)
self.radBtn4.setDisabled(True)
self.radBtn4.toggled.connect(self.video_record_trial)
self.lneEdt1 = Qt.QLineEdit()
self.lneEdt1.setDisabled(True)
self.lneEdt1.setText("example_video_name.mp4")
self.lneEdt1.textChanged.connect(self.video_record_trial)
self.new_edit1 = Qt.QPushButton("New/Edit")
self.new_edit1.clicked.connect(self.new_edit_video_tracking_method)
self.new_edit1.setDisabled(True)
self.load1 = Qt.QPushButton("Load")
self.load1.clicked.connect(self.load_video_tracking_method)
self.load1.setDisabled(True)
self.save1 = Qt.QPushButton("Save")
self.save1.clicked.connect(self.save_video_tracking_method)
self.save1.setDisabled(True)
butLayout1 = Qt.QHBoxLayout()
butLayout1.addWidget(self.new_edit1)
butLayout1.addWidget(self.load1)
butLayout1.addWidget(self.save1)
self.frame1 = Qt.QFrame()
self.frame1.setFrameStyle(1)
self.frame1.setFixedSize(350,140)
frame_layout1 = Qt.QVBoxLayout()
frame_layout1.addWidget(self.radBtn1)
frame_layout1.addLayout(butLayout1)
frame_layout1.addWidget(self.radBtn4)
frame_layout1.addWidget(self.lneEdt1)
self.frame1.setLayout(frame_layout1)
self.radBtn2 = Qt.QRadioButton(self.tr("Load Video Recording"))
self.radBtn2.setFont(boldFont)
self.radBtn2.setChecked(False)
self.radBtn2.toggled.connect(self.load_video_recording)
self.btnLneEdt1 = ButtonLineEdit()
self.btnLneEdt1.setReadOnly(True)
self.btnLneEdt1.setDisabled(True)
self.btnLneEdt1.buttonClicked.connect(self.find_video_recording)
self.vid_len_label = Qt.QLabel("Loaded video length / Trial Runtime (seconds):")
self.vid_len_label.setDisabled(True)
self.vid_len_spnBox = Qt.QSpinBox()
self.vid_len_spnBox.setMaximum(86400)
self.vid_len_spnBox.setDisabled(True)
vidLenLayout = Qt.QFormLayout()
vidLenLayout.addRow(self.vid_len_label, self.vid_len_spnBox)
vidLenLayout.setLabelAlignment(QtCore.Qt.AlignRight)
self.new_edit2 = Qt.QPushButton("New/Edit")
self.new_edit2.clicked.connect(self.new_edit_video_tracking_method)
self.new_edit2.setDisabled(True)
self.load2 = Qt.QPushButton("Load")
self.load2.clicked.connect(self.load_video_tracking_method)
self.load2.setDisabled(True)
self.save2 = Qt.QPushButton("Save")
self.save2.clicked.connect(self.save_video_tracking_method)
self.save2.setDisabled(True)
butLayout2 = Qt.QHBoxLayout()
butLayout2.addWidget(self.new_edit2)
butLayout2.addWidget(self.load2)
butLayout2.addWidget(self.save2)
self.frame2 = Qt.QFrame()
self.frame2.setFrameStyle(1)
self.frame2.setFixedSize(350,145)
frame_layout2 = Qt.QVBoxLayout()
frame_layout2.addWidget(self.radBtn2)
frame_layout2.addWidget(self.btnLneEdt1)
frame_layout2.addLayout(vidLenLayout)
frame_layout2.addLayout(butLayout2)
self.frame2.setLayout(frame_layout2)
self.radBtn3 = Qt.QRadioButton(self.tr("Connect to Arduino"))
self.radBtn3.setFont(boldFont)
self.radBtn3.setChecked(False)
self.radBtn3.toggled.connect(self.connect_to_arduino)
self.new_edit3 = Qt.QPushButton("New/Edit")
self.new_edit3.clicked.connect(self.new_edit_ard_method)
self.new_edit3.setDisabled(True)
self.load3 = Qt.QPushButton("Load")
self.load3.clicked.connect(self.load_ard_method)
self.load3.setDisabled(True)
self.save3 = Qt.QPushButton("Save")
self.save3.clicked.connect(self.save_ard_method)
self.save3.setDisabled(True)
butLayout3 = Qt.QHBoxLayout()
butLayout3.addWidget(self.new_edit3)
butLayout3.addWidget(self.load3)
butLayout3.addWidget(self.save3)
self.frame3 = Qt.QFrame()
self.frame3.setFrameStyle(1)
self.frame3.setFixedSize(350,80)
frame_layout3 = Qt.QVBoxLayout()
frame_layout3.addWidget(self.radBtn3)
frame_layout3.addLayout(butLayout3)
self.frame3.setLayout(frame_layout3)
self.group = Qt.QButtonGroup()
self.group.addButton(self.radBtn1)
self.group.addButton(self.radBtn2)
self.group.setExclusive(False)
self.lcd = QtGui.QLCDNumber()
self.lcd.setFixedSize(140,40)
self.frame6 = Qt.QFrame()
self.frame6.setFrameStyle(1)
frame_layout6 = Qt.QHBoxLayout()
frame_layout6.addWidget(self.lcd)
self.frame6.setLayout(frame_layout6)
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.timer_time)
global s, m, h
s = 0
m = 0
h = 0
layout = Qt.QGridLayout()
layout.addWidget(self.actButton1, 0, 0)
layout.addWidget(self.actButton2, 1, 0)
layout.addWidget(self.actButton3, 0, 1)
layout.addWidget(self.actButton4, 1, 1)
layout.addWidget(self.actButton5, 0, 2)
layout.addWidget(self.actButton6, 1, 2)
layout.addWidget(self.actButton7, 0, 3)
layout.addWidget(self.actButton8, 1, 3)
layout5 = Qt.QHBoxLayout()
layout5.addWidget(self.startButton)
layout5.addWidget(self.stopButton)
self.frame5 = Qt.QFrame()
self.frame5.setFrameStyle(1)
frame_layout5 = Qt.QHBoxLayout()
frame_layout5.addLayout(layout5)
frame_layout5.addLayout(layout)
self.frame5.setLayout(frame_layout5)
layout1 = Qt.QHBoxLayout()
layout1.addWidget(self.frame5)
layout1.addWidget(self.frame6)
layout2 = Qt.QVBoxLayout()
layout2.addLayout(layout1)
layout2.addWidget(self.table)
layout2.addWidget(label1)
layout2.addWidget(self.txtEdt1)
layout5 = Qt.QVBoxLayout()
layout5.addWidget(self.canvas)
layout5.addWidget(self.fig_statusbar)
self.main_frame.setLayout(layout5)
layout3 = Qt.QVBoxLayout()
layout3.addWidget(self.frame4)
layout3.addWidget(self.frame1)
layout3.addWidget(self.frame2)
layout3.addWidget(self.frame3)
layout3.addStretch(True)
layout4 = Qt.QHBoxLayout()
layout4.addLayout(layout3)
layout4.addLayout(layout2)
self.setLayout(layout4)
global global_results, vidTrack_setup_parameters, ard_setup_parameters, arena_setup_parameters, camera, video_name, recording, record_name
global_results = {}
vidTrack_setup_parameters = None
ard_setup_parameters = None
arena_setup_parameters = None
camera = None
video_name = None
recording = False
record_name = None
def __del__(self):
# Restore sys.stdout
sys.stdout = sys.__stdout__
def normalOutputWritten(self, text):
self.txtEdt1.append(text)
def cell_was_clicked(self):
global global_results
try:
current_row = self.table.currentRow()
current_column = self.table.currentColumn()
current_item_text = self.table.currentItem().text()
if current_column == 5:
global_results[str(current_row+1)]["trial_info"].update({"Comments":current_item_text})
except:
pass
def new_edit_arena_setup(self):
global arena_setup_parameters
if arena_setup_parameters == None:
try:
from setup_arena_picture import arena_setup_parameters
except:
pass
self.new_edit_arena = setup_arena_picture.Window(cam=camera, vid_name=video_name, arena_sp=arena_setup_parameters)
self.new_edit_arena.show()
arena_setup_parameters = None
def load_arena_setup(self):
try:
global arena_setup_parameters
name = Qt.QFileDialog.getOpenFileName(self, 'Load Arena Setup')
with open(name, 'r') as f:
arena_setup_parameters = eval(f.read())
print("loaded arena setup: %s" %(arena_setup_parameters))
except:
pass
def save_arena_setup(self):
global arena_setup_parameters
try:
try:
from setup_arena_picture import arena_setup_parameters
except:
pass
name = Qt.QFileDialog.getSaveFileName(self, 'Save Arena Setup')
with open(name, 'w') as text_file:
text_file.write(str(arena_setup_parameters))
print("arena setup saved %s" %(arena_setup_parameters))
except:
pass
def home(self):
self.fig_toolbar.home()
def pan(self):
self.fig_toolbar.pan()
def zoom_to_rect(self):
self.fig_toolbar.zoom()
def show_hide_labels(self):
global arena_setup_parameters
if self.lblsBtn.isChecked():
# add labels to the grid squares
self.grid_labels = []
for j in range(self.num_height_divs):
y = self.height_div/2+j*self.height_div
for i in range(self.num_width_divs):
x = self.width_div/2.+float(i)*self.width_div
if arena_setup_parameters ['arena_pic_name'] != "no picture":
grid_label = self.axes.text(x,y, ("(%d,%d)" %(i,j)), fontsize=6, color='w', ha='center', va='center')
else:
grid_label = self.axes.text(x,y, ("(%d,%d)" %(i,j)), fontsize=6, color='b', ha='center', va='center')
self.grid_labels.append(grid_label)
self.canvas.draw()
elif not self.lblsBtn.isChecked():
for label in self.grid_labels:
label.remove()
self.canvas.draw()
def redraw_arena_setup(self):
global arena_setup_parameters
if arena_setup_parameters == None:
from setup_arena_picture import arena_setup_parameters
self.lblsBtn.setChecked(False)
self.arena_pic_name = arena_setup_parameters['arena_pic_name']
try:
self.arena_pic = mpimg.imread(self.arena_pic_name)
except:
pass
self.arena_width = arena_setup_parameters['arena_width']
self.arena_height = arena_setup_parameters['arena_height']
self.width_div = arena_setup_parameters['width_div']
self.height_div = arena_setup_parameters['height_div']
self.num_width_divs = int(round(self.arena_width/self.width_div,0))
self.num_height_divs = int(round(self.arena_height/self.height_div,0))
self.fig.clear()
self.axes = self.fig.add_subplot(111)
try:
self.axes.imshow(self.arena_pic, origin="lower", extent=(0,self.arena_width,0,self.arena_height))
except:
npArray = np.array([[[0, 0, 0, 0]]], dtype="uint8")
self.axes.imshow(npArray, origin="lower", extent=(0,self.arena_width,0,self.arena_height))
self.axes.set_xticks(np.arange(0, (self.arena_width+self.width_div),self.width_div))
self.axes.set_yticks(np.arange(0, (self.arena_height+self.height_div),self.height_div))
self.axes.tick_params(axis="both", which="major", labelsize="6")
self.axes.grid(which="major", axis="both", linestyle='-', color='r')
self.axes.xaxis.tick_top()
self.axes.invert_yaxis()
self.axes.set_xlabel("Arena Width (cm)", fontsize=8)
self.axes.set_ylabel("Arena Height (cm)", fontsize=8)
self.axes.tick_params(axis="both", which="major", labelsize=6)
self.canvas.draw()
def mouse_movement(self, event):
try:
self.coords_label.setText("x=%.3f, y=%.3f" %(event.xdata, event.ydata))
except:
if not self.coords_label.text == "":
self.coords_label.setText("")
else:
pass
def connect_to_camera(self):
global camera, vidTrack_setup_parameters
if self.radBtn1.isChecked():
camera = True
self.new_edit1.setDisabled(False)
self.load1.setDisabled(False)
self.save1.setDisabled(False)
self.radBtn4.setDisabled(False)
self.radBtn2.setChecked(False)
#self.load_video_recording()
elif not self.radBtn1.isChecked():
camera = None
vidTrack_setup_parameters = None
self.new_edit1.setDisabled(True)
self.load1.setDisabled(True)
self.save1.setDisabled(True)
self.radBtn4.setDisabled(True)
self.lneEdt1.setDisabled(True)
def new_edit_video_tracking_method(self):
global camera, video_name, vidTrack_setup_parameters
if vidTrack_setup_parameters == None:
try:
from Camera_Wizard_v2 import vidTrack_setup_parameters
except:
pass
self.cwiz = Camera_Wizard_v2.CameraWizard(cam = camera, vid_name = video_name, vidTrack_sp = vidTrack_setup_parameters)
self.cwiz.show()
vidTrack_setup_parameters = None
def load_video_tracking_method(self):
try:
global vidTrack_setup_parameters
name = Qt.QFileDialog.getOpenFileName(self, 'Load Video Tracking Setup')
with open(name, 'r') as f:
vidTrack_setup_parameters = eval(f.read())
print("loaded video tracking setup: %s" %(vidTrack_setup_parameters))
except:
pass
def save_video_tracking_method(self):
global vidTrack_setup_parameters
try:
if vidTrack_setup_parameters == None:
try:
from Camera_Wizard_v2 import vidTrack_setup_parameters
except:
pass
name = Qt.QFileDialog.getSaveFileName(self, 'Save Video Tracking Setup')
with open(name, 'w') as text_file:
text_file.write(str(vidTrack_setup_parameters))
print("video tracking setup saved %s" %(vidTrack_setup_parameters))
except:
pass
def video_record_trial(self):
global recording, record_name
if self.radBtn4.isChecked():
self.lneEdt1.setDisabled(False)
recording = True
record_name = self.lneEdt1.text()
elif not self.radBtn4.isChecked():
self.lneEdt1.setDisabled(True)
recording = False
record_name = None
def load_video_recording(self):
global video_name, vidTrack_setup_parameters
if self.radBtn2.isChecked():
self.btnLneEdt1.setDisabled(False)
self.radBtn4.setChecked(False)
self.radBtn4.setDisabled(False)
self.radBtn1.setChecked(False)
#self.connect_to_camera()
elif not self.radBtn2.isChecked():
video_name = None
vidTrack_setup_parameters = None
self.btnLneEdt1.clear()
self.btnLneEdt1.setDisabled(True)
self.vid_len_label.setDisabled(True)
self.vid_len_spnBox.clear()
self.vid_len_spnBox.setDisabled(True)
self.new_edit2.setDisabled(True)
self.load2.setDisabled(True)
self.save2.setDisabled(True)
def find_video_recording(self):
try:
global video_name
video_name = Qt.QFileDialog.getOpenFileName(self, 'Find Video Recording')
self.btnLneEdt1.setText(video_name)
self.vid_len_label.setDisabled(False)
self.vid_len_spnBox.setDisabled(False)
self.new_edit2.setDisabled(False)
self.load2.setDisabled(False)
self.save2.setDisabled(False)
print("video name: %s" %(video_name))
except:
pass
def connect_to_arduino(self):
global a, ard_setup_parameters
if self.radBtn3.isChecked():
try:
from nanpy import (SerialManager, ArduinoApi)
connection = SerialManager()
a = ArduinoApi(connection=connection)
self.new_edit3.setDisabled(False)
self.load3.setDisabled(False)
self.save3.setDisabled(False)
except:
print("Failed to connect to Arduino")
self.radBtn3.setChecked(False) # change back to False
elif not self.radBtn3.isChecked():
try:
del a
except:
pass
ard_setup_parameters = None
self.new_edit3.setDisabled(True)
self.load3.setDisabled(True)
self.save3.setDisabled(True)
def new_edit_ard_method(self):
global ard_setup_parameters
if ard_setup_parameters == None:
try:
from Arduino_Wizard_v2 import ard_setup_parameters
except:
pass
self.awiz = Arduino_Wizard_v2.ArduinoWizard(arduino_sp=ard_setup_parameters)
self.awiz.show()
ard_setup_parameters = None
def load_ard_method(self):
global ard_setup_parameters
try:
name = Qt.QFileDialog.getOpenFileName(self, 'Load DAAC setup')
with open(name, 'r') as f:
ard_setup_parameters = eval(f.read())
print("loaded DAAC setup: %s" %(ard_setup_parameters))
except:
pass
def save_ard_method(self):
global ard_setup_parameters
try:
if ard_setup_parameters == None:
try:
from Arduino_Wizard_v2 import ard_setup_parameters
except:
pass
name = Qt.QFileDialog.getSaveFileName(self, 'Save DAAC setup')
with open(name, 'w') as text_file:
text_file.write(str(ard_setup_parameters))
print("DAAC setup saved %s" %(ard_setup_parameters))
except:
pass
def timer_start(self):
global s,m,h
self.timer.start(1000)
def timer_time(self):
global s,m,h
if s < 59:
s += 1
else:
if m < 59:
s = 0
m += 1
elif m == 59 and h < 24:
h += 1
m = 0
s = 0
else:
self.timer.stop()
time = "{0}:{1}:{2}".format(h,m,s)
self.lcd.setDigitCount(len(time))
self.lcd.display(time)
self.activateWindow()
def timer_reset(self):
global s,m,h
self.timer.stop()
s = 0
m = 0
h = 0
time = "{0}:{1}:{2}".format(h,m,s)
self.lcd.setDigitCount(len(time))
self.lcd.display(time)
def start_pressed(self):
global global_results, ard_results, _isRunning2, vidTrack_setup_parameters, ard_setup_parameters
date = strftime("%Y-%m-%d")
start_time = strftime("%H:%M:%S")
self.timer_reset()
self.timer_start()
if (self.radBtn1.isChecked() or self.radBtn2.isChecked()):
trck = self.vidTracking.Run()
while _isRunning2:
Qt.qApp.processEvents()
self.timer.stop()
end_time = strftime("%H:%M:%S")
self.actButton1.setChecked(False)
self.actButton2.setChecked(False)
self.actButton3.setChecked(False)
self.actButton4.setChecked(False)
self.actButton5.setChecked(False)
self.actButton6.setChecked(False)
self.actButton7.setChecked(False)
self.actButton8.setChecked(False)
vidTrack_results = {}
trial_info = {}
try:
if (vidTrack_setup_parameters['video_tracking_algorithm'] == "Frame Differencing") or (vidTrack_setup_parameters['video_tracking_algorithm'] == "MOG"):
if self.radBtn1.isChecked() or (self.radBtn2.isChecked() and (int(self.vid_len_spnBox.value() == 0))):
vidTrack_results['run_time'] = trck[2]
trial_info['Trial_Duration'] = trck[2][-1]
vidTrack_results['vid_pts_time'] = trck[0]
elif self.radBtn2.isChecked() and (int(self.vid_len_spnBox.value()) != 0):
video_time_calibration_factor = trck[2][-1]/int(self.vid_len_spnBox.value())
vidTrack_results['run_time'] = []
for time in trck[2]:
mod_time = round(time/video_time_calibration_factor, 2)
vidTrack_results['run_time'].append(mod_time)
trial_info['Trial_Duration'] = vidTrack_results['run_time'][-1]
vidTrack_results['vid_pts_time'] = []
for time in trck[0]:
mod_time = round(time/video_time_calibration_factor, 2)
vidTrack_results["vid_pts_time"].append(mod_time)
vidTrack_results['position'] = trck[1]
elif vidTrack_setup_parameters['video_tracking_algorithm'] == "None":
if self.radBtn1.isChecked() or (self.radBtn2.isChecked() and (int(self.vid_len_spnBox.value()) == 0)):
vidTrack_results['run_time'] = trck
trial_info['Trial_Duration'] = trck[-1]
elif self.radBtn2.isChecked() and (int(self.vid_len_spnBox.value()) != 0):
video_time_calibration_factor = (trck[-1]/int(self.vid_len_spnBox.value()))
vidTrack_results['vid_pts_time'] = []
for time in trck[0]:
mod_time = round(time/video_time_calibration_factor, 2)
vidTrack_results['vid_pts_time'].append(mod_time)
trial_info['Trial_Duration'] = vidTrack_results['run_time'][-1]
except:
pass
try:
try:
duration = str(trial_info['Trial_Duration'])
except:
duration = str(ard_results['ard_loop_time'][-1])
except:
duration = str(0)
trial_info["Date"] = date
trial_info["Start_Time"] = start_time
trial_info["End_Time"] = end_time
trial_info["Trial_Duration"] = duration
self.table_rowCount += 1
self.table.setRowCount(self.table_rowCount)
global_results[str(self.table_rowCount)] = {}
global_results[str(self.table_rowCount)]["trial_info"] = trial_info
try:
global_results[str(self.table_rowCount)]["results"] = vidTrack_results
global_results[str(self.table_rowCount)]["results"].update(ard_results)
try:
ard_results = {}
global_results[str(self.table_rowCount)]["results"].update(ard_results)
except:
pass
except:
pass
self.table.setItem(self.table_rowCount-1, 0, Qt.QTableWidgetItem(str(self.table_rowCount)))
self.table.setItem(self.table_rowCount-1, 1, Qt.QTableWidgetItem(date))
self.table.setItem(self.table_rowCount-1, 2, Qt.QTableWidgetItem(start_time))
self.table.setItem(self.table_rowCount-1, 3, Qt.QTableWidgetItem(end_time))
self.table.setItem(self.table_rowCount-1, 4, Qt.QTableWidgetItem(duration))
# main ============================================
def main():
app = Qt.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| gpl-3.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/feature_selection/tests/test_rfe.py | 1 | 10440 | """
Testing Recursive feature elimination
"""
import numpy as np
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
from sklearn.datasets import load_iris, make_friedman1
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.metrics import get_scorer
from sklearn.metrics import make_scorer
from sklearn.metrics import zero_one_loss
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC, SVR
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
class MockClassifier(object):
"""
Dummy classifier to test recursive feature elimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
def test_rfe_cv_n_jobs():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
rfecv = RFECV(estimator=SVC(kernel='linear'))
rfecv.fit(X, y)
rfecv_ranking = rfecv.ranking_
rfecv_grid_scores = rfecv.grid_scores_
rfecv.set_params(n_jobs=2)
rfecv.fit(X, y)
assert_array_almost_equal(rfecv.ranking_, rfecv_ranking)
assert_array_almost_equal(rfecv.grid_scores_, rfecv_grid_scores)
| mit |
alexsavio/scikit-learn | examples/applications/plot_stock_market.py | 76 | 8522 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
mhallsmoore/qstrader | examples/momentum_taa.py | 1 | 8160 | import operator
import os
import pandas as pd
import pytz
from qstrader.alpha_model.alpha_model import AlphaModel
from qstrader.alpha_model.fixed_signals import FixedSignalsAlphaModel
from qstrader.asset.equity import Equity
from qstrader.asset.universe.dynamic import DynamicUniverse
from qstrader.asset.universe.static import StaticUniverse
from qstrader.signals.momentum import MomentumSignal
from qstrader.signals.signals_collection import SignalsCollection
from qstrader.data.backtest_data_handler import BacktestDataHandler
from qstrader.data.daily_bar_csv import CSVDailyBarDataSource
from qstrader.statistics.tearsheet import TearsheetStatistics
from qstrader.trading.backtest import BacktestTradingSession
class TopNMomentumAlphaModel(AlphaModel):
def __init__(
self, signals, mom_lookback, mom_top_n, universe, data_handler
):
"""
Initialise the TopNMomentumAlphaModel
Parameters
----------
signals : `SignalsCollection`
The entity for interfacing with various pre-calculated
signals. In this instance we want to use 'momentum'.
mom_lookback : `integer`
The number of business days to calculate momentum
lookback over.
mom_top_n : `integer`
The number of assets to include in the portfolio,
ranking from highest momentum descending.
universe : `Universe`
The collection of assets utilised for signal generation.
data_handler : `DataHandler`
The interface to the CSV data.
Returns
-------
None
"""
self.signals = signals
self.mom_lookback = mom_lookback
self.mom_top_n = mom_top_n
self.universe = universe
self.data_handler = data_handler
def _highest_momentum_asset(
self, dt
):
"""
Calculates the ordered list of highest performing momentum
assets restricted to the 'Top N', for a particular datetime.
Parameters
----------
dt : `pd.Timestamp`
The datetime for which the highest momentum assets
should be calculated.
Returns
-------
`list[str]`
Ordered list of highest performing momentum assets
restricted to the 'Top N'.
"""
assets = self.signals['momentum'].assets
# Calculate the holding-period return momenta for each asset,
# for the particular provided momentum lookback period
all_momenta = {
asset: self.signals['momentum'](
asset, self.mom_lookback
) for asset in assets
}
# Obtain a list of the top performing assets by momentum
# restricted by the provided number of desired assets to
# trade per month
return [
asset[0] for asset in sorted(
all_momenta.items(),
key=operator.itemgetter(1),
reverse=True
)
][:self.mom_top_n]
def _generate_signals(
self, dt, weights
):
"""
Calculate the highest performing momentum for each
asset then assign 1 / N of the signal weight to each
of these assets.
Parameters
----------
dt : `pd.Timestamp`
The datetime for which the signal weights
should be calculated.
weights : `dict{str: float}`
The current signal weights dictionary.
Returns
-------
`dict{str: float}`
The newly created signal weights dictionary.
"""
top_assets = self._highest_momentum_asset(dt)
for asset in top_assets:
weights[asset] = 1.0 / self.mom_top_n
return weights
def __call__(
self, dt
):
"""
Calculates the signal weights for the top N
momentum alpha model, assuming that there is
sufficient data to begin calculating momentum
on the desired assets.
Parameters
----------
dt : `pd.Timestamp`
The datetime for which the signal weights
should be calculated.
Returns
-------
`dict{str: float}`
The newly created signal weights dictionary.
"""
assets = self.universe.get_assets(dt)
weights = {asset: 0.0 for asset in assets}
# Only generate weights if the current time exceeds the
# momentum lookback period
if self.signals.warmup >= self.mom_lookback:
weights = self._generate_signals(dt, weights)
return weights
if __name__ == "__main__":
# Duration of the backtest
start_dt = pd.Timestamp('1998-12-22 14:30:00', tz=pytz.UTC)
burn_in_dt = pd.Timestamp('1999-12-22 14:30:00', tz=pytz.UTC)
end_dt = pd.Timestamp('2020-12-31 23:59:00', tz=pytz.UTC)
# Model parameters
mom_lookback = 126 # Six months worth of business days
mom_top_n = 3 # Number of assets to include at any one time
# Construct the symbols and assets necessary for the backtest
# This utilises the SPDR US sector ETFs, all beginning with XL
strategy_symbols = ['XL%s' % sector for sector in "BCEFIKPUVY"]
assets = ['EQ:%s' % symbol for symbol in strategy_symbols]
# As this is a dynamic universe of assets (XLC is added later)
# we need to tell QSTrader when XLC can be included. This is
# achieved using an asset dates dictionary
asset_dates = {asset: start_dt for asset in assets}
asset_dates['EQ:XLC'] = pd.Timestamp('2018-06-18 00:00:00', tz=pytz.UTC)
strategy_universe = DynamicUniverse(asset_dates)
# To avoid loading all CSV files in the directory, set the
# data source to load only those provided symbols
csv_dir = os.environ.get('QSTRADER_CSV_DATA_DIR', '.')
strategy_data_source = CSVDailyBarDataSource(csv_dir, Equity, csv_symbols=strategy_symbols)
strategy_data_handler = BacktestDataHandler(strategy_universe, data_sources=[strategy_data_source])
# Generate the signals (in this case holding-period return based
# momentum) used in the top-N momentum alpha model
momentum = MomentumSignal(start_dt, strategy_universe, lookbacks=[mom_lookback])
signals = SignalsCollection({'momentum': momentum}, strategy_data_handler)
# Generate the alpha model instance for the top-N momentum alpha model
strategy_alpha_model = TopNMomentumAlphaModel(
signals, mom_lookback, mom_top_n, strategy_universe, strategy_data_handler
)
# Construct the strategy backtest and run it
strategy_backtest = BacktestTradingSession(
start_dt,
end_dt,
strategy_universe,
strategy_alpha_model,
signals=signals,
rebalance='end_of_month',
long_only=True,
cash_buffer_percentage=0.01,
burn_in_dt=burn_in_dt,
data_handler=strategy_data_handler
)
strategy_backtest.run()
# Construct benchmark assets (buy & hold SPY)
benchmark_symbols = ['SPY']
benchmark_assets = ['EQ:SPY']
benchmark_universe = StaticUniverse(benchmark_assets)
benchmark_data_source = CSVDailyBarDataSource(csv_dir, Equity, csv_symbols=benchmark_symbols)
benchmark_data_handler = BacktestDataHandler(benchmark_universe, data_sources=[benchmark_data_source])
# Construct a benchmark Alpha Model that provides
# 100% static allocation to the SPY ETF, with no rebalance
benchmark_alpha_model = FixedSignalsAlphaModel({'EQ:SPY': 1.0})
benchmark_backtest = BacktestTradingSession(
burn_in_dt,
end_dt,
benchmark_universe,
benchmark_alpha_model,
rebalance='buy_and_hold',
long_only=True,
cash_buffer_percentage=0.01,
data_handler=benchmark_data_handler
)
benchmark_backtest.run()
# Performance Output
tearsheet = TearsheetStatistics(
strategy_equity=strategy_backtest.get_equity_curve(),
benchmark_equity=benchmark_backtest.get_equity_curve(),
title='US Sector Momentum - Top 3 Sectors'
)
tearsheet.plot_results()
| mit |
jreback/pandas | pandas/tests/frame/methods/test_reindex_like.py | 8 | 1187 | import numpy as np
import pytest
from pandas import DataFrame
import pandas._testing as tm
class TestDataFrameReindexLike:
def test_reindex_like(self, float_frame):
other = float_frame.reindex(index=float_frame.index[:10], columns=["C", "B"])
tm.assert_frame_equal(other, float_frame.reindex_like(other))
@pytest.mark.parametrize(
"method,expected_values",
[
("nearest", [0, 1, 1, 2]),
("pad", [np.nan, 0, 1, 1]),
("backfill", [0, 1, 2, 2]),
],
)
def test_reindex_like_methods(self, method, expected_values):
df = DataFrame({"x": list(range(5))})
result = df.reindex_like(df, method=method, tolerance=0)
tm.assert_frame_equal(df, result)
result = df.reindex_like(df, method=method, tolerance=[0, 0, 0, 0])
tm.assert_frame_equal(df, result)
def test_reindex_like_subclass(self):
# https://github.com/pandas-dev/pandas/issues/31925
class MyDataFrame(DataFrame):
pass
expected = DataFrame()
df = MyDataFrame()
result = df.reindex_like(expected)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
ahirner/py-cf-crawl | cf-parse.py | 1 | 10159 |
import sys
from pymongo import MongoClient
from bs4 import BeautifulSoup, element
import pandas as pd
MONGOPORT = 27017 #3001
OUT = "out"
if len(sys.argv) > 1: OUT = sys.argv[1]
LIMIT = 0
# In[2]:
class DictData(dict):
def insert(self, d, postprocess = None):
if d is None: return
for k,v in d.iteritems():
if v is not None:
res = v
if type(res) is list or type(res) is element.ResultSet:
res = [r for r in res if not None]
for i, r in enumerate(res):
self.insert({k+'_'+str(i) : r}, postprocess)
return
#todo: integize
if postprocess is not None:
res = postprocess(res)
#print k, type(res), res
if type(res) is element.Tag:
res = res.get_text(strip = True)
elif res is str or v is unicode: res = res.strip()
self[k] = res
def parse_detail(soup):
data = DictData()
sec = soup.find("div", class_ = ["basic_dtl", "founder_head"])
if sec is not None:
for f in sec.findAll("h5"):
spans = f.findAll("span")
if len(spans) == 3: data.insert({spans[0].get_text(strip=True) : spans[2]})
data.insert({'fav': sec.find("span", class_ = "cir_innertext_new")})
resp = sec.find("div", class_ = "r2_new")
if resp is not None:
data.insert({'responsiveness': resp.img}, lambda i: i['src'].split('/')[-1][:-4])
data.insert({'last_act_days': sec.find("span", class_ = "cir_today_txt_new")}, lambda t: 0 if t.get_text(strip=True).lower() == "today" else t)
sec = soup.find("div", class_ = "profile_status")
if sec is not None:
data.insert({'status' : sec.find("span","status_text")}, lambda s: s.b)
sec = soup.find("div", class_ = "other_advising")
if sec is not None:
data.insert({'interest' : sec.findAll("span", class_="single")})
sec = soup.find("div", class_ = "section_box about")
if sec is not None:
data.insert({'about' : sec.find("div", class_="show")}, lambda t: unicode(t.get_text()))
sec = soup.find("div", class_ = "section_box skills")
if sec is not None:
left = sec.find("div", class_="prof_skill_uhave_col")
right = sec.find("div", class_="prof_skill_uhave_col_right")
if left is not None:
for l,r in zip(left.findAll("div", class_="prof_skill_row"), right.findAll("div", "prof_skill_row")):
skill = l.find("label", class_="prof_sk_txt").get_text(strip=True).replace(" ","_")
have = 3 - len(l.select('span.deactive'))
need = 3 - len(r.select('span.deactive'))
data['skill_'+skill+'_has'] = have
data['skill_'+skill+'_need'] = need
data.insert({'skill_other' : sec.findAll("span", class_="single")})
secs = soup.findAll("div", class_ = "section_box experience")
for sec in secs:
if sec.find(class_="icon_experience") is not None:
exp_work = sec.find("table", class_="exp-list")
if exp_work is not None:
for i, w in enumerate(exp_work.findAll("tr")):
items = w.findAll("td")
data['exp_work_'+str(i)] = items[0].get_text(strip=True)
data['exp_work_'+str(i)+'_y'] = items[2].get_text(strip=True)
for area in sec.findAll("div", class_="show mar_top15"):
first_p = area.p.get_text(strip=True).lower()
if first_p == "areas of expertise":
data.insert({'exp_expertise' : area.findAll("span", class_="expertise_col")})
elif first_p == "looking expertise":
data.insert({'exp_expertise_look' : area.findAll("span", class_="expertise_col")})
elif first_p == "startup experience":
for se in area.findAll(class_="startup_Col"):
data.insert({'exp_startup_' + se.find("h5").get_text(strip=True).replace(" ","_") : se.find("h2", class_="startup_txt")})
elif first_p == "adviser experience":
for se in area.findAll(class_="startup_Col"):
data.insert({'exp_advisor_' + se.find("h5").get_text(strip=True).replace(" ","_") : se.find("h2", class_="startup_txt")})
elif first_p == "key accomplishments":
data.insert({'exp_key_accomplishments' : area.findAll("p")[1]}, lambda t: unicode(t.get_text()))
if sec.find(class_="icon_certifications") is not None:
data.insert({"exp_certification" : sec.findAll("p")})
if sec.find(class_="icon_accelerators") is not None:
data.insert({"exp_accelerator" : sec.findAll("p")})
sec = soup.find("div", class_ = "section_box looking_for")
if sec is not None:
#This part is for advisors
data.insert({'looking_compensation_offer' : sec.find("p")})
#data.insert({'looking_comp_cash' : sec.find(class_="looking_icon_01_enable")}, lambda l: l is not None)
#data.insert({'looking_comp_share' : sec.find(class_="looking_icon_02_enable")}, lambda l: l is not None)
#data.insert({'looking_comp_both' : sec.find(class_="looking_icon_03_enable")}, lambda l: l is not None)
data.insert({'looking_compensation' : sec.find("p", class_="cntr")}, lambda c: c.b)
#Others
for i, wc in enumerate(sec.findAll(class_="weeklyCmt")):
if i == 0:
if wc.span: data.insert({'looking_weekly_h_commit' : wc.span}, lambda c: c.get_text(strip=True).replace("Weekly commitmentof ", "").replace(" hours per week",""))
if i == 1:
if wc.span: data.insert({'looking_reward_in_return' : wc.span})
bs = sec.find("div", class_="business_stage_tabs")
if bs is not None:
for stage in bs.findAll("span", class_="active"):
#zero indexing is hacky: breaks if class label sequence is changed
data.insert({'looking_business_stage' : stage['class'][0]}, lambda s: s.replace("_icon",""))
sec = soup.find("div", class_ = "section_box education")
if sec is not None:
i = 0
for sb in sec.findAll("div", class_="section_box_inner"):
l = sb.find("b", class_="f_left")
r = sb.find("span", class_="right_txt")
paragraph = sb.find("p")
data.insert({'edu_school_'+str(i) : l})
data.insert({'edu_school_'+str(i)+'_y' : r})
data.insert({'edu_school_'+str(i)+'_summary' : paragraph}, lambda p: p.get_text(strip=False).strip())
i += 1
sec = soup.find("div", class_ = "section_box archetype")
if sec is not None:
data.insert({'archetype_desc' : sec.h3})
content = sec.find("div", class_="archetype_cont")
if content is not None:
#always presume weakness / strenghts in [0] and [1]
s, w = content.findAll("ul")
data.insert({'archetype_strength' : s.findAll("li")})
data.insert({'archetype_weakness' : w.findAll("li")})
sec = soup.find("div", class_ = "section_box agegroup")
if sec is not None:
lu = ["<25", "25-35", "36-55", "55+"]
data.insert({'age' : sec.find("img")}, lambda i : lu[int(i['src'].replace("level_", "").split('/')[-1][:-4])-1])
sec = soup.find("div", class_ = "section_box social1")
if sec is not None:
s_classes = ['linkedin', 'facebook', 'twitter', 'meetup']
s_trues = [False] * len(s_classes)
for sp in sec.findAll("a", class_="social_profiles"):
t = sp.find("h4").get_text(strip=True).lower()
if t is not None:
if t in s_classes: s_trues[s_classes.index(t)] = True
else: "Error: new social profile found (%s)" % t
for i, v in enumerate(s_trues): data.insert({'social_'+ s_classes[i] : s_trues[i]})
sec = soup.find("div", class_ = "section_box video")
data.insert({'video' : True if sec is not None else False})
sec = soup.find("div", class_ = "events2")
if sec is not None:
i = 0
for e, d in zip(sec.findAll("div", class_="event_Col hov relative"), sec.findAll("div", class_="event_Col sel_Col")):
data.insert({'event_'+str(i) : e.find("a")})
data.insert({'event_'+str(i)+'_desc' : d.find("h5").contents[2].strip()})
i += 1
return data
# In[13]:
db = MongoClient('localhost', MONGOPORT).cf_crawl
profiles = db.cf_profiles
#profiles.create_index("id", unique=True)
p_cursor = profiles.find({'detail_raw' : {"$exists" : True}})
if LIMIT: p_cursor = p_cursor.limit(LIMIT)
print "Processing %i profiles" % p_cursor.count()
def write_results(p_url, upd):
data = {"$set" : upd}
result = profiles.update_one({'url': p_url}, data, upsert=False)
return result
#import pandas as pd
#full_data = []
for p in p_cursor:
try:
data = parse_detail(BeautifulSoup(p['detail_raw'], "lxml"))
w = write_results(p['url'], data)
#p['name'] = p['name'].strip()
print "(%s) processed for %i fields, mod %i" % ("http://www.cofounderslab.com"+p['url'], len(data), w.modified_count)
#mini sanity check
if len(data) == 0: print "ERROR, retrieved 0 fields"
#if len(data) < 128:
# del p['detail_raw']
# data.update(p)
# full_data.append(p)
except KeyboardInterrupt:
print "Interrupted .. last url: " + "http://www.cofounderslab.com"+p['url']
break
# In[8]:
"""p_cursor = profiles.find({'detail_raw' : {"$exists" : True}})
print "Processing %i profiles" % p_cursor.count()
#p = p_cursor.()
for p in p_cursor:
if 'age' in p: print p['age']
else: print p['url']"""
# In[15]:
#df = pd.DataFrame(full_data)
#print "saving "+str(len(df))+" rows to "+OUT
#df.to_excel(OUT+".xls")
#df.to_csv(OUT+".csv", encoding="utf-8")
| mit |
gpldecha/non-parametric-regression | examples/python/lwr_2D_example1.py | 1 | 1323 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 30 14:01:20 2017
@author: guillaume
"""
import sys
import numpy as np
from pynpr.pylwr import *
import matplotlib.pyplot as plt
def plot_2D_LWR(xx,yy,zz,title='LWR'):
fig = plt.figure(facecolor='white')
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
CS = plt.contourf(xx,yy,zz,cmap=plt.cm.seismic)
plt.axis('equal')
cbar = plt.colorbar(CS)
plt.title(title,fontsize=20)
return CS
#%% EXAMPLE 3 (2 D)
lwr_opts = lwr_options()
lwr_opts.D = [1, 1]
lwr_opts.K = 50
lwr_opts.k_bias = 50
lwr_opts.y_bias = -1
lwr_opts.bUseKDT = True
lwr_opts.print_param()
xx1, yy1 = np.meshgrid(np.arange(-5, 5, 0.1), np.arange(-5, 5, 0.1), sparse=False)
shape1 = xx1.shape
y = np.sin(2 * xx1) * np.cos(2 * yy1)
y = y.flatten(1)
X = np.vstack((xx1.flatten(1),yy1.flatten(1)))
idx = np.sqrt(np.sum(X**2,axis=0)) > 4
y[idx] = 0
xx2, yy2 = np.meshgrid(np.arange(-5, 5, 0.2), np.arange(-5, 5, 0.2), sparse=False)
Xq = np.vstack((xx2.flatten(1),yy2.flatten(1)))
shape2 = xx2.shape
#%%
lwr = LWR(lwr_opts)
lwr.train(X,y)
yq = lwr.predict(Xq)
#%% Plot
plt.close('all')
plot_2D_LWR(xx1,yy1,y.reshape(shape1),'Training data')
plot_2D_LWR(xx2,yy2,yq.reshape(shape2),'Test data')
| mit |
matk86/pymatgen | pymatgen/analysis/transition_state.py | 1 | 12480 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import os
import glob
import numpy as np
from monty.json import jsanitize
from monty.json import MSONable
scipy_old_piecewisepolynomial = True
try:
from scipy.interpolate import PiecewisePolynomial
except ImportError:
from scipy.interpolate import CubicSpline
scipy_old_piecewisepolynomial = False
from pymatgen.util.plotting import pretty_plot
from pymatgen.io.vasp import Poscar, Outcar
"""
Some reimplementation of Henkelman's Transition State Analysis utilities,
which are originally in Perl. Additional features beyond those offered by
Henkelman's utilities will be added.
This allows the usage and customization in Python.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = '[email protected]'
__date__ = '6/1/15'
class NEBAnalysis(MSONable):
"""
An NEBAnalysis class.
"""
def __init__(self, r, energies, forces, structures, spline_options=None):
"""
Initializes an NEBAnalysis from the cumulative root mean squared distances
between structures, the energies, the forces, the structures and the
interpolation_order for the analysis.
Args:
r: Root mean square distances between structures
energies: Energies of each structure along reaction coordinate
forces: Tangent forces along the reaction coordinate.
structures ([Structure]): List of Structures along reaction
coordinate.
spline_options (dict): Options for cubic spline. For example,
{"saddle_point": "zero_slope"} forces the slope at the saddle to
be zero.
"""
self.r = np.array(r)
self.energies = np.array(energies)
self.forces = np.array(forces)
self.structures = structures
self.spline_options = spline_options if spline_options is not None \
else {}
# We do a piecewise interpolation between the points. Each spline (
# cubic by default) is constrained by the boundary conditions of the
# energies and the tangent force, i.e., the derivative of
# the energy at each pair of points.
self.setup_spline(spline_options=self.spline_options)
def setup_spline(self, spline_options=None):
"""
Setup of the options for the spline interpolation
Args:
spline_options (dict): Options for cubic spline. For example,
{"saddle_point": "zero_slope"} forces the slope at the saddle to
be zero.
"""
self.spline_options = spline_options
relative_energies = self.energies - self.energies[0]
if scipy_old_piecewisepolynomial:
if self.spline_options:
raise RuntimeError('Option for saddle point not available with'
'old scipy implementation')
self.spline = PiecewisePolynomial(
self.r, np.array([relative_energies, -self.forces]).T,
orders=3)
else:
# New scipy implementation for scipy > 0.18.0
if self.spline_options.get('saddle_point', '') == 'zero_slope':
imax = np.argmax(relative_energies)
self.spline = CubicSpline(x=self.r[:imax + 1],
y=relative_energies[:imax + 1],
bc_type=((1, 0.0), (1, 0.0)))
cspline2 = CubicSpline(x=self.r[imax:], y=relative_energies[imax:],
bc_type=((1, 0.0), (1, 0.0)))
self.spline.extend(c=cspline2.c, x=cspline2.x[1:])
else:
self.spline = CubicSpline(x=self.r, y=relative_energies,
bc_type=((1, 0.0), (1, 0.0)))
@classmethod
def from_outcars(cls, outcars, structures, **kwargs):
"""
Initializes an NEBAnalysis from Outcar and Structure objects. Use
the static constructors, e.g., :class:`from_dir` instead if you
prefer to have these automatically generated from a directory of NEB
calculations.
Args:
outcars ([Outcar]): List of Outcar objects. Note that these have
to be ordered from start to end along reaction coordinates.
structures ([Structure]): List of Structures along reaction
coordinate. Must be same length as outcar.
interpolation_order (int): Order of polynomial to use to
interpolate between images. Same format as order parameter in
scipy.interplotate.PiecewisePolynomial.
"""
if len(outcars) != len(structures):
raise ValueError("# of Outcars must be same as # of Structures")
# Calculate cumulative root mean square distance between structures,
# which serves as the reaction coordinate. Note that these are
# calculated from the final relaxed structures as the coordinates may
# have changed from the initial interpolation.
r = [0]
prev = structures[0]
for st in structures[1:]:
dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)])
r.append(np.sqrt(np.sum(dists ** 2)))
prev = st
r = np.cumsum(r)
energies = []
forces = []
for i, o in enumerate(outcars):
o.read_neb()
energies.append(o.data["energy"])
if i in [0, len(outcars) - 1]:
forces.append(0)
else:
forces.append(o.data["tangent_force"])
forces = np.array(forces)
r = np.array(r)
return cls(r=r, energies=energies, forces=forces,
structures=structures, **kwargs)
def get_extrema(self, normalize_rxn_coordinate=True):
"""
Returns the positions of the extrema along the MEP. Both local
minimums and maximums are returned.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
Returns:
(min_extrema, max_extrema), where the extrema are given as
[(x1, y1), (x2, y2), ...].
"""
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
min_extrema = []
max_extrema = []
for i in range(1, len(x) - 1):
if y[i] < y[i-1] and y[i] < y[i+1]:
min_extrema.append((x[i] * scale, y[i]))
elif y[i] > y[i-1] and y[i] > y[i+1]:
max_extrema.append((x[i] * scale, y[i]))
return min_extrema, max_extrema
def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
"""
Returns the NEB plot. Uses Henkelman's approach of spline fitting
each section of the reaction path based on tangent force and energies.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
label_barrier (bool): Whether to label the maximum barrier.
Returns:
matplotlib.pyplot object.
"""
plt = pretty_plot(12, 8)
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
relative_energies = self.energies - self.energies[0]
plt.plot(self.r * scale, relative_energies * 1000, 'ro',
x * scale, y, 'k-', linewidth=2, markersize=10)
plt.xlabel("Reaction coordinate")
plt.ylabel("Energy (meV)")
plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))
if label_barrier:
data = zip(x * scale, y)
barrier = max(data, key=lambda d: d[1])
plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')
plt.annotate('%.0f meV' % barrier[1],
xy=(barrier[0] / 2, barrier[1] * 1.02),
xytext=(barrier[0] / 2, barrier[1] * 1.02),
horizontalalignment='center')
plt.tight_layout()
return plt
@classmethod
def from_dir(cls, root_dir, relaxation_dirs=None, **kwargs):
"""
Initializes a NEBAnalysis object from a directory of a NEB run.
Note that OUTCARs must be present in all image directories. For the
terminal OUTCARs from relaxation calculations, you can specify the
locations using relaxation_dir. If these are not specified, the code
will attempt to look for the OUTCARs in 00 and 0n directories,
followed by subdirs "start", "end" or "initial", "final" in the
root_dir. These are just some typical conventions used
preferentially in Shyue Ping's MAVRL research group. For the
non-terminal points, the CONTCAR is read to obtain structures. For
terminal points, the POSCAR is used. The image directories are
assumed to be the only directories that can be resolved to integers.
E.g., "00", "01", "02", "03", "04", "05", "06". The minimum
sub-directory structure that can be parsed is of the following form (
a 5-image example is shown):
00:
- POSCAR
- OUTCAR
01, 02, 03, 04, 05:
- CONTCAR
- OUTCAR
06:
- POSCAR
- OUTCAR
Args:
root_dir (str): Path to the root directory of the NEB calculation.
relaxation_dirs (tuple): This specifies the starting and ending
relaxation directories from which the OUTCARs are read for the
terminal points for the energies.
Returns:
NEBAnalysis object.
"""
neb_dirs = []
for d in os.listdir(root_dir):
pth = os.path.join(root_dir, d)
if os.path.isdir(pth) and d.isdigit():
i = int(d)
neb_dirs.append((i, pth))
neb_dirs = sorted(neb_dirs, key=lambda d: d[0])
outcars = []
structures = []
# Setup the search sequence for the OUTCARs for the terminal
# directories.
terminal_dirs = []
if relaxation_dirs is not None:
terminal_dirs.append(relaxation_dirs)
terminal_dirs.append((neb_dirs[0][1], neb_dirs[-1][1]))
terminal_dirs.append([os.path.join(root_dir, d)
for d in ["start", "end"]])
terminal_dirs.append([os.path.join(root_dir, d)
for d in ["initial", "final"]])
for i, d in neb_dirs:
outcar = glob.glob(os.path.join(d, "OUTCAR*"))
contcar = glob.glob(os.path.join(d, "CONTCAR*"))
poscar = glob.glob(os.path.join(d, "POSCAR*"))
terminal = i == 0 or i == neb_dirs[-1][0]
if terminal:
for ds in terminal_dirs:
od = ds[0] if i == 0 else ds[1]
outcar = glob.glob(os.path.join(od, "OUTCAR*"))
if outcar:
outcar = sorted(outcar)
outcars.append(Outcar(outcar[-1]))
break
else:
raise ValueError("OUTCAR cannot be found for terminal "
"point %s" % d)
structures.append(Poscar.from_file(poscar[0]).structure)
else:
outcars.append(Outcar(outcar[0]))
structures.append(Poscar.from_file(contcar[0]).structure)
return NEBAnalysis.from_outcars(outcars, structures, **kwargs)
def as_dict(self):
"""
Dict representation of NEBAnalysis.
Returns:
JSON serializable dict representation.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
'r': jsanitize(self.r),
'energies': jsanitize(self.energies),
'forces': jsanitize(self.forces),
'structures': [s.as_dict() for s in self.structures]}
| mit |
LevinJ/SSD_tensorflow_VOC | check_images.py | 1 | 2645 | from datasets import pascalvoc_datasets
import tensorflow as tf
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
# from nets import nets_factory
from preprocessing import preprocessing_factory
import numpy as np
import cv2
from utility import visualization
from nets.ssd import g_ssd_model
from preprocessing.ssd_vgg_preprocessing import np_image_unwhitened
from preprocessing.ssd_vgg_preprocessing import preprocess_for_train
from preprocessing.ssd_vgg_preprocessing import preprocess_for_eval
import tf_utils
import math
from preparedata import PrepareData
class CheckImages(PrepareData):
def __init__(self):
PrepareData.__init__(self)
return
def list_images(self,sess, batch_data,target_object=1):
i = 0
num_batches = math.ceil(self.dataset.num_samples / float(self.batch_size))
target_filenames = []
num_bboxes = 0
while i < num_batches:
image, filename,glabels,gbboxes,gdifficults,gclasses, glocalisations, gscores = sess.run(list(batch_data))
pos_sample_inds = (glabels == target_object).nonzero()
num_bboxes += len(pos_sample_inds[0])
target_filenames.extend(list(filename[np.unique(pos_sample_inds[0])]))
i += 1
# print("number of matched image {} matched bboxes {} for {}, \n{}".format(len(target_filenames), num_bboxes, target_object, np.array(target_filenames)))
print("{}, number of matched image {} matched bboxes {}, ratio {}".format(target_object, len(target_filenames), num_bboxes, float(num_bboxes)/len(target_filenames)))
return
def run(self):
with tf.Graph().as_default():
# batch_data= self.get_voc_2007_train_data(is_training_data=False)
# batch_data = self.get_voc_2007_test_data()
# batch_data = self.get_voc_2012_train_data()
batch_data = self.get_voc_2007_2012_train_data(is_training_data = False)
# return self.iterate_file_name(batch_data)
with tf.Session('') as sess:
init = tf.global_variables_initializer()
sess.run(init)
with slim.queues.QueueRunners(sess):
# target_object = 9
for target_object in np.arange(1,21):
self.list_images(sess, batch_data,target_object)
return
if __name__ == "__main__":
obj= CheckImages()
obj.run() | apache-2.0 |
simon-pepin/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
flightgong/scikit-learn | sklearn/neural_network/rbm.py | 2 | 11972 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import atleast2d_or_csr, check_arrays
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
`intercept_hidden_` : array-like, shape (n_components,)
Biases of the hidden units.
`intercept_visible_` : array-like, shape (n_features,)
Biases of the visible units.
`components_` : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
X, = check_arrays(X, sparse_format='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
rng = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, rng)
v_ = self._sample_visibles(h_, rng)
return v_
def partial_fit(self, X):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X, = check_arrays(X, sparse_format='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
v = atleast2d_or_csr(X)
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X, = check_arrays(X, sparse_format='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
adamgreenhall/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
katstalk/android_external_chromium_org | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 61 | 2538 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows
# but it is not clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
mlperf/inference_results_v0.7 | closed/QCT/code/dlrm-99/tensorrt/infer.py | 18 | 4431 | #! /usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import ctypes
sys.path.insert(0, os.getcwd())
# The plugin .so file has to be loaded at global scope and before `import torch` to avoid cuda version mismatch.
DLRM_INTERACTIONS_PLUGIN_LIBRARY="build/plugins/DLRMInteractionsPlugin/libdlrminteractionsplugin.so"
if not os.path.isfile(DLRM_INTERACTIONS_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(DLRM_INTERACTIONS_PLUGIN_LIBRARY),
"Please build the DLRM Interactions plugin."
))
ctypes.CDLL(DLRM_INTERACTIONS_PLUGIN_LIBRARY)
DLRM_BOTTOM_MLP_PLUGIN_LIBRARY="build/plugins/DLRMBottomMLPPlugin/libdlrmbottommlpplugin.so"
if not os.path.isfile(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY),
"Please build the DLRM Bottom MLP plugin."
))
ctypes.CDLL(DLRM_BOTTOM_MLP_PLUGIN_LIBRARY)
from code.common.runner import EngineRunner, get_input_format
from code.common import logging
import code.common.arguments as common_args
import json
import numpy as np
from sklearn.metrics import roc_auc_score
import tensorrt as trt
import torch
import time
def evaluate(ground_truths, predictions):
assert len(ground_truths) == len(predictions), "Number of ground truths are different from number of predictions"
return roc_auc_score(ground_truths, predictions)
def run_dlrm_accuracy(engine_file, batch_size, num_pairs=10000000, verbose=False):
if verbose:
logging.info("Running DLRM accuracy test with:")
logging.info(" engine_file: {:}".format(engine_file))
logging.info(" batch_size: {:}".format(batch_size))
logging.info(" num_pairs: {:}".format(num_pairs))
runner = EngineRunner(engine_file, verbose=verbose)
pair_dir = os.path.join(os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"), "criteo", "full_recalib")
input_dtype, input_format = get_input_format(runner.engine)
if input_dtype == trt.DataType.FLOAT:
format_string = "fp32"
elif input_dtype == trt.DataType.HALF:
format_string = "fp16"
elif input_dtype == trt.DataType.INT8:
format_string = "int8"
if input_format == trt.TensorFormat.CHW4:
format_string += "_chw4"
else:
raise NotImplementedError("Unsupported DataType {:}".format(input_dtype))
numerical_inputs = np.load(os.path.join(pair_dir, "numeric_{:}.npy".format(format_string)))
categ_inputs = np.load(os.path.join(pair_dir, "categorical_int32.npy"))
predictions = []
refs = []
batch_idx = 0
for pair_idx in range(0, int(num_pairs), batch_size):
actual_batch_size = batch_size if pair_idx + batch_size <= num_pairs else num_pairs - pair_idx
numerical_input = np.ascontiguousarray(numerical_inputs[pair_idx:pair_idx + actual_batch_size])
categ_input = np.ascontiguousarray(categ_inputs[pair_idx:pair_idx + actual_batch_size])
start_time = time.time()
outputs = runner([numerical_input, categ_input], actual_batch_size)
if verbose:
logging.info("Batch {:d} (Size {:}) >> Inference time: {:f}".format(batch_idx, actual_batch_size, time.time() - start_time))
predictions.extend(outputs[0][:actual_batch_size])
batch_idx += 1
ground_truths = np.load(os.path.join(pair_dir, "ground_truth.npy"))[:num_pairs].tolist()
return evaluate(ground_truths, predictions)
def main():
args = common_args.parse_args(common_args.ACCURACY_ARGS)
logging.info("Running accuracy test...")
acc = run_dlrm_accuracy(args["engine_file"], args["batch_size"], args["num_samples"],
verbose=args["verbose"])
logging.info("Accuracy: {:}".format(acc))
if __name__ == "__main__":
main()
| apache-2.0 |
diegocavalca/Studies | phd-thesis/nilmtk/nilmtk/dataset_converters/eco/convert_eco.py | 1 | 8200 | from __future__ import print_function
import pandas as pd
import numpy as np
import sys
from os import listdir, getcwd
from os.path import isdir, join, dirname, abspath
from pandas import concat
from nilmtk.utils import get_module_directory, check_directory_exists
from nilmtk.datastore import Key
from nilmtk.measurement import LEVEL_NAMES
from nilm_metadata import convert_yaml_to_hdf5
"""
DATASET STRUCTURE:
------------------
On extracting all the dataset values, we should arrive at a similar directory structure as
mentioned.
ECO Dataset will have a folder '<i>_sm_csv' and '<i>_plug_csv' where i is the building no.
Originally, the expected folder structure was:
- <i>_sm_csv has a folder <i>
- <i>_plug_csv has folders 01, 02,....<n> where n is the plug numbers.
This version also supports the following structure, which can be created by unpacking the
ZIP files uniformly, creating a folder for each one:
- <i>_sm_csv has a folder <i>
- <i>_plug_csv has a folder <i>, and <i>_plug_csv/<i> has folders 01, 02,....<n>,
where n is the plug numbers.
Each folder has a CSV file as per each day, with each day csv file containing
86400 entries.
"""
plugs_column_name = {1: ('power', 'active')}
def convert_eco(dataset_loc, hdf_filename, timezone):
"""
Parameters:
-----------
dataset_loc: str
The root directory where the dataset is located.
hdf_filename: str
The location where the hdf_filename is present.
The directory location has to contain the
hdf5file name for the converter to work.
timezone: str
specifies the timezone of the dataset.
"""
# Creating a new HDF File
store = pd.HDFStore(hdf_filename, 'w', complevel=9, complib='blosc')
check_directory_exists(dataset_loc)
directory_list = [i for i in listdir(dataset_loc) if '.txt' not in i]
directory_list.sort()
print(directory_list)
found_any_sm = False
found_any_plug = False
# Traversing every folder
for folder in directory_list:
if folder[0] == '.' or folder[-3:] == '.h5':
print('Skipping ', folder)
continue
#Building number and meter_flag
building_no = int(folder[:2])
meter_flag = None
if 'sm_csv' in folder:
meter_flag = 'sm'
elif 'plugs' in folder:
meter_flag = 'plugs'
else:
print('Skipping folder', folder)
continue
print('Computing for folder', folder)
dir_list = [i for i in listdir(join(dataset_loc, folder)) if isdir(join(dataset_loc,folder,i))]
dir_list.sort()
if meter_flag == 'plugs' and len(dir_list) < 3:
# Try harder to find the subfolders
folder = join(folder, folder[:2])
dir_list = [i for i in listdir(join(dataset_loc, folder)) if isdir(join(dataset_loc,folder,i))]
print('Current dir list:', dir_list)
for fl in dir_list:
print('Computing for folder ', fl)
fl_dir_list = [i for i in listdir(join(dataset_loc,folder,fl)) if '.csv' in i]
fl_dir_list.sort()
if meter_flag == 'sm':
for fi in fl_dir_list:
found_any_sm = True
df = pd.read_csv(join(dataset_loc,folder,fl,fi), names=[i for i in range(1,17)], dtype=np.float32)
for phase in range(1,4):
key = str(Key(building=building_no, meter=phase))
df_phase = df.loc[:,[1+phase, 5+phase, 8+phase, 13+phase]]
# get reactive power
power = df_phase.loc[:, (1+phase, 13+phase)].values
reactive = power[:,0] * np.tan(power[:,1] * np.pi / 180)
df_phase['Q'] = reactive
df_phase.index = pd.date_range(start=fi[:-4], freq='s', periods=86400, tz='GMT')
df_phase = df_phase.tz_convert(timezone)
sm_column_name = {
1+phase:('power', 'active'),
5+phase:('current', ''),
8+phase:('voltage', ''),
13+phase:('phase_angle', ''),
'Q': ('power', 'reactive'),
}
df_phase.columns = pd.MultiIndex.from_tuples([
sm_column_name[col] for col in df_phase.columns
])
power_active = df_phase['power', 'active']
tmp_before = np.size(power_active)
df_phase = df_phase[power_active != -1]
power_active = df_phase['power', 'active']
tmp_after = np.size(power_active)
if tmp_before != tmp_after:
print('Removed missing measurements - Size before: ' + str(tmp_before) + ', size after: ' + str(tmp_after))
df_phase.columns.set_names(LEVEL_NAMES, inplace=True)
if not key in store:
store.put(key, df_phase, format='Table')
else:
store.append(key, df_phase, format='Table')
store.flush()
print('Building', building_no, ', Meter no.', phase,
'=> Done for ', fi[:-4])
else:
#Meter number to be used in key
meter_num = int(fl) + 3
key = str(Key(building=building_no, meter=meter_num))
current_folder = join(dataset_loc,folder,fl)
if not fl_dir_list:
raise RuntimeError("No CSV file found in " + current_folder)
#Getting dataframe for each csv file seperately
for fi in fl_dir_list:
found_any_plug = True
df = pd.read_csv(join(current_folder, fi), names=[1], dtype=np.float64)
df.index = pd.date_range(start=fi[:-4].replace('.', ':'), freq='s', periods=86400, tz='GMT')
df.columns = pd.MultiIndex.from_tuples(plugs_column_name.values())
df = df.tz_convert(timezone)
df.columns.set_names(LEVEL_NAMES, inplace=True)
tmp_before = np.size(df.power.active)
df = df[df.power.active != -1]
tmp_after = np.size(df.power.active)
if (tmp_before != tmp_after):
print('Removed missing measurements - Size before: ' + str(tmp_before) + ', size after: ' + str(tmp_after))
# If table not present in hdf5, create or else append to existing data
if not key in store:
store.put(key, df, format='Table')
print('Building',building_no,', Meter no.',meter_num,'=> Done for ',fi[:-4])
else:
store.append(key, df, format='Table')
store.flush()
print('Building',building_no,', Meter no.',meter_num,'=> Done for ',fi[:-4])
if not found_any_plug or not found_any_sm:
raise RuntimeError('The files were not found! Please check the folder structure. Extract each ZIP file into a folder with its base name (e.g. extract "01_plugs_csv.zip" into a folder named "01_plugs_csv", etc.)')
print("Data storage completed.")
store.close()
# Adding the metadata to the HDF5file
print("Proceeding to Metadata conversion...")
meta_path = join(
get_module_directory(),
'dataset_converters',
'eco',
'metadata'
)
convert_yaml_to_hdf5(meta_path, hdf_filename)
print("Completed Metadata conversion.")
| cc0-1.0 |
wbinventor/openmc | openmc/data/resonance_covariance.py | 10 | 27136 | from collections.abc import MutableSequence
import warnings
import io
import copy
import numpy as np
import pandas as pd
from . import endf
import openmc.checkvalue as cv
from .resonance import Resonances
def _add_file2_contributions(file32params, file2params):
"""Function for aiding in adding resonance parameters from File 2 that are
not always present in File 32. Uses already imported resonance data.
Paramaters
----------
file32params : pandas.Dataframe
Incomplete set of resonance parameters contained in File 32.
file2params : pandas.Dataframe
Resonance parameters from File 2. Ordered by energy.
Returns
-------
parameters : pandas.Dataframe
Complete set of parameters ordered by L-values and then energy
"""
# Use l-values and competitiveWidth from File 2 data
# Re-sort File 2 by energy to match File 32
file2params = file2params.sort_values(by=['energy'])
file2params.reset_index(drop=True, inplace=True)
# Sort File 32 parameters by energy as well (maintaining index)
file32params.sort_values(by=['energy'], inplace=True)
# Add in values (.values converts to array first to ignore index)
file32params['L'] = file2params['L'].values
if 'competitiveWidth' in file2params.columns:
file32params['competitiveWidth'] = file2params['competitiveWidth'].values
# Resort to File 32 order (by L then by E) for use with covariance
file32params.sort_index(inplace=True)
return file32params
class ResonanceCovariances(Resonances):
"""Resolved resonance covariance data
Parameters
----------
ranges : list of openmc.data.ResonanceCovarianceRange
Distinct energy ranges for resonance data
Attributes
----------
ranges : list of openmc.data.ResonanceCovarianceRange
Distinct energy ranges for resonance data
"""
@property
def ranges(self):
return self._ranges
@ranges.setter
def ranges(self, ranges):
cv.check_type('resonance ranges', ranges, MutableSequence)
self._ranges = cv.CheckedList(ResonanceCovarianceRange,
'resonance range', ranges)
@classmethod
def from_endf(cls, ev, resonances):
"""Generate resonance covariance data from an ENDF evaluation.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
resonances : openmc.data.Resonance object
openmc.data.Resonanance object generated from the same evaluation
used to import values not contained in File 32
Returns
-------
openmc.data.ResonanceCovariances
Resonance covariance data
"""
file_obj = io.StringIO(ev.section[32, 151])
# Determine whether discrete or continuous representation
items = endf.get_head_record(file_obj)
n_isotope = items[4] # Number of isotopes
ranges = []
for iso in range(n_isotope):
items = endf.get_cont_record(file_obj)
abundance = items[1]
fission_widths = (items[3] == 1) # Flag for fission widths
n_ranges = items[4] # Number of resonance energy ranges
for j in range(n_ranges):
items = endf.get_cont_record(file_obj)
# Unresolved flags - 0: only scattering radius given
# 1: resolved parameters given
# 2: unresolved parameters given
unresolved_flag = items[2]
formalism = items[3] # resonance formalism
# Throw error for unsupported formalisms
if formalism in [0, 7]:
error = 'LRF='+str(formalism)+' covariance not supported '\
'for this formalism'
raise NotImplementedError(error)
if unresolved_flag in (0, 1):
# Resolved resonance region
resonance = resonances.ranges[j]
erange = _FORMALISMS[formalism].from_endf(ev, file_obj,
items, resonance)
ranges.append(erange)
elif unresolved_flag == 2:
warn = 'Unresolved resonance not supported. Covariance '\
'values for the unresolved region not imported.'
warnings.warn(warn)
return cls(ranges)
class ResonanceCovarianceRange:
"""Resonace covariance range. Base class for different formalisms.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
mpar : int
Number of parameters in covariance matrix for each individual resonance
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max):
self.energy_min = energy_min
self.energy_max = energy_max
def subset(self, parameter_str, bounds):
"""Produce a subset of resonance parameters and the corresponding
covariance matrix to an IncidentNeutron object.
Parameters
----------
parameter_str : str
parameter to be discriminated
(i.e. 'energy', 'captureWidth', 'fissionWidthA'...)
bounds : np.array
[low numerical bound, high numerical bound]
Returns
-------
res_cov_range : openmc.data.ResonanceCovarianceRange
ResonanceCovarianceRange object that contains a subset of the
covariance matrix (upper triangular) as well as a subset parameters
within self.file2params
"""
# Copy range and prevent change of original
res_cov_range = copy.deepcopy(self)
parameters = self.file2res.parameters
cov = res_cov_range.covariance
mpar = res_cov_range.mpar
# Create mask
mask1 = parameters[parameter_str] >= bounds[0]
mask2 = parameters[parameter_str] <= bounds[1]
mask = mask1 & mask2
res_cov_range.parameters = parameters[mask]
indices = res_cov_range.parameters.index.values
# Build subset of covariance
sub_cov_dim = len(indices)*mpar
cov_subset_vals = []
for index1 in indices:
for i in range(mpar):
for index2 in indices:
for j in range(mpar):
if index2*mpar+j >= index1*mpar+i:
cov_subset_vals.append(cov[index1*mpar+i,
index2*mpar+j])
cov_subset = np.zeros([sub_cov_dim, sub_cov_dim])
tri_indices = np.triu_indices(sub_cov_dim)
cov_subset[tri_indices] = cov_subset_vals
res_cov_range.file2res.parameters = parameters[mask]
res_cov_range.covariance = cov_subset
return res_cov_range
def sample(self, n_samples):
"""Sample resonance parameters based on the covariances provided
within an ENDF evaluation.
Parameters
----------
n_samples : int
The number of samples to produce
Returns
-------
samples : list of openmc.data.ResonanceCovarianceRange objects
List of samples size `n_samples`
"""
warn_str = 'Sampling routine does not guarantee positive values for '\
'parameters. This can lead to undefined behavior in the '\
'reconstruction routine.'
warnings.warn(warn_str)
parameters = self.parameters
cov = self.covariance
# Symmetrizing covariance matrix
cov = cov + cov.T - np.diag(cov.diagonal())
formalism = self.formalism
mpar = self.mpar
samples = []
# Handling MLBW/SLBW sampling
if formalism == 'mlbw' or formalism == 'slbw':
params = ['energy', 'neutronWidth', 'captureWidth', 'fissionWidth',
'competitiveWidth']
param_list = params[:mpar]
mean_array = parameters[param_list].values
mean = mean_array.flatten()
par_samples = np.random.multivariate_normal(mean, cov,
size=n_samples)
spin = parameters['J'].values
l_value = parameters['L'].values
for sample in par_samples:
energy = sample[0::mpar]
gn = sample[1::mpar]
gg = sample[2::mpar]
gf = sample[3::mpar] if mpar > 3 else parameters['fissionWidth'].values
gx = sample[4::mpar] if mpar > 4 else parameters['competitiveWidth'].values
gt = gn + gg + gf + gx
records = []
for j, E in enumerate(energy):
records.append([energy[j], l_value[j], spin[j], gt[j],
gn[j], gg[j], gf[j], gx[j]])
columns = ['energy', 'L', 'J', 'totalWidth', 'neutronWidth',
'captureWidth', 'fissionWidth', 'competitiveWidth']
sample_params = pd.DataFrame.from_records(records,
columns=columns)
# Copy ResonanceRange object
res_range = copy.copy(self.file2res)
res_range.parameters = sample_params
samples.append(res_range)
# Handling RM sampling
elif formalism == 'rm':
params = ['energy', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
param_list = params[:mpar]
mean_array = parameters[param_list].values
mean = mean_array.flatten()
par_samples = np.random.multivariate_normal(mean, cov,
size=n_samples)
spin = parameters['J'].values
l_value = parameters['L'].values
for sample in par_samples:
energy = sample[0::mpar]
gn = sample[1::mpar]
gg = sample[2::mpar]
gfa = sample[3::mpar] if mpar > 3 else parameters['fissionWidthA'].values
gfb = sample[4::mpar] if mpar > 3 else parameters['fissionWidthB'].values
records = []
for j, E in enumerate(energy):
records.append([energy[j], l_value[j], spin[j], gn[j],
gg[j], gfa[j], gfb[j]])
columns = ['energy', 'L', 'J', 'neutronWidth',
'captureWidth', 'fissionWidthA', 'fissionWidthB']
sample_params = pd.DataFrame.from_records(records,
columns=columns)
# Copy ResonanceRange object
res_range = copy.copy(self.file2res)
res_range.parameters = sample_params
samples.append(res_range)
return samples
class MultiLevelBreitWignerCovariance(ResonanceCovarianceRange):
"""Multi-level Breit-Wigner resolved resonance formalism covariance data.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
mpar : int
Number of parameters in covariance matrix for each individual resonance
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max)
self.parameters = parameters
self.covariance = covariance
self.mpar = mpar
self.lcomp = lcomp
self.file2res = copy.copy(file2res)
self.formalism = 'mlbw'
@classmethod
def from_endf(cls, ev, file_obj, items, resonance):
"""Create MLBW covariance data from an ENDF evaluation.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
file_obj : file-like object
ENDF file positioned at the second record of a resonance range
subsection in MF=32, MT=151
items : list
Items from the CONT record at the start of the resonance range
subsection
resonance : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
Returns
-------
openmc.data.MultiLevelBreitWignerCovariance
Multi-level Breit-Wigner resonance covariance parameters
"""
# Read energy-dependent scattering radius if present
energy_min, energy_max = items[0:2]
nro, naps = items[4:6]
if nro != 0:
params, ape = endf.get_tab1_record(file_obj)
# Other scatter radius parameters
items = endf.get_cont_record(file_obj)
target_spin = items[0]
lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form
nls = items[4] # number of l-values
# Build covariance matrix for General Resolved Resonance Formats
if lcomp == 1:
items = endf.get_cont_record(file_obj)
# Number of short range type resonance covariances
num_short_range = items[4]
# Number of long range type resonance covariances
num_long_range = items[5]
# Read resonance widths, J values, etc
records = []
for i in range(num_short_range):
items, values = endf.get_list_record(file_obj)
mpar = items[2]
num_res = items[5]
num_par_vals = num_res*6
res_values = values[:num_par_vals]
cov_values = values[num_par_vals:]
energy = res_values[0::6]
spin = res_values[1::6]
gt = res_values[2::6]
gn = res_values[3::6]
gg = res_values[4::6]
gf = res_values[5::6]
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gt[i], gn[i],
gg[i], gf[i]])
# Build the upper-triangular covariance matrix
cov_dim = mpar*num_res
cov = np.zeros([cov_dim, cov_dim])
indices = np.triu_indices(cov_dim)
cov[indices] = cov_values
# Compact format - Resonances and individual uncertainties followed by
# compact correlations
elif lcomp == 2:
items, values = endf.get_list_record(file_obj)
mean = items
num_res = items[5]
energy = values[0::12]
spin = values[1::12]
gt = values[2::12]
gn = values[3::12]
gg = values[4::12]
gf = values[5::12]
par_unc = []
for i in range(num_res):
res_unc = values[i*12+6 : i*12+12]
# Delete 0 values (not provided, no fission width)
# DAJ/DGT always zero, DGF sometimes nonzero [1, 2, 5]
res_unc_nonzero = []
for j in range(6):
if j in [1, 2, 5] and res_unc[j] != 0.0:
res_unc_nonzero.append(res_unc[j])
elif j in [0, 3, 4]:
res_unc_nonzero.append(res_unc[j])
par_unc.extend(res_unc_nonzero)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gt[i], gn[i],
gg[i], gf[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Compatible resolved resonance format
elif lcomp == 0:
cov = np.zeros([4, 4])
records = []
cov_index = 0
for i in range(nls):
items, values = endf.get_list_record(file_obj)
num_res = items[5]
for j in range(num_res):
one_res = values[18*j:18*(j+1)]
res_values = one_res[:6]
cov_values = one_res[6:]
records.append(list(res_values))
# Populate the coviariance matrix for this resonance
# There are no covariances between resonances in lcomp=0
cov[cov_index, cov_index] = cov_values[0]
cov[cov_index+1, cov_index+1 : cov_index+2] = cov_values[1:2]
cov[cov_index+1, cov_index+3] = cov_values[4]
cov[cov_index+2, cov_index+2] = cov_values[3]
cov[cov_index+2, cov_index+3] = cov_values[5]
cov[cov_index+3, cov_index+3] = cov_values[6]
cov_index += 4
if j < num_res-1: # Pad matrix for additional values
cov = np.pad(cov, ((0, 4), (0, 4)), 'constant',
constant_values=0)
# Create pandas DataFrame with resonance data, currently
# redundant with data.IncidentNeutron.resonance
columns = ['energy', 'J', 'totalWidth', 'neutronWidth',
'captureWidth', 'fissionWidth']
parameters = pd.DataFrame.from_records(records, columns=columns)
# Determine mpar (number of parameters for each resonance in
# covariance matrix)
nparams, params = parameters.shape
covsize = cov.shape[0]
mpar = int(covsize/nparams)
# Add parameters from File 2
parameters = _add_file2_contributions(parameters,
resonance.parameters)
# Create instance of class
mlbw = cls(energy_min, energy_max, parameters, cov, mpar, lcomp,
resonance)
return mlbw
class SingleLevelBreitWignerCovariance(MultiLevelBreitWignerCovariance):
"""Single-level Breit-Wigner resolved resonance formalism covariance data.
Single-level Breit-Wigner resolved resonance data is is identified by LRF=1
in the ENDF-6 format.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
mpar : int
Number of parameters in covariance matrix for each individual resonance
formalism : str
String descriptor of formalism
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res)
self.formalism = 'slbw'
class ReichMooreCovariance(ResonanceCovarianceRange):
"""Reich-Moore resolved resonance formalism covariance data.
Reich-Moore resolved resonance data is identified by LRF=3 in the ENDF-6
format.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
mpar : int
Number of parameters in covariance matrix for each individual resonance
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max)
self.parameters = parameters
self.covariance = covariance
self.mpar = mpar
self.lcomp = lcomp
self.file2res = copy.copy(file2res)
self.formalism = 'rm'
@classmethod
def from_endf(cls, ev, file_obj, items, resonance):
"""Create Reich-Moore resonance covariance data from an ENDF
evaluation. Includes the resonance parameters contained separately in
File 32.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
file_obj : file-like object
ENDF file positioned at the second record of a resonance range
subsection in MF=2, MT=151
items : list
Items from the CONT record at the start of the resonance range
subsection
resonance : openmc.data.Resonance object
openmc.data.Resonanance object generated from the same evaluation
used to import values not contained in File 32
Returns
-------
openmc.data.ReichMooreCovariance
Reich-Moore resonance covariance parameters
"""
# Read energy-dependent scattering radius if present
energy_min, energy_max = items[0:2]
nro, naps = items[4:6]
if nro != 0:
params, ape = endf.get_tab1_record(file_obj)
# Other scatter radius parameters
items = endf.get_cont_record(file_obj)
target_spin = items[0]
lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form
nls = items[4] # Number of l-values
# Build covariance matrix for General Resolved Resonance Formats
if lcomp == 1:
items = endf.get_cont_record(file_obj)
# Number of short range type resonance covariances
num_short_range = items[4]
# Number of long range type resonance covariances
num_long_range = items[5]
# Read resonance widths, J values, etc
channel_radius = {}
scattering_radius = {}
records = []
for i in range(num_short_range):
items, values = endf.get_list_record(file_obj)
mpar = items[2]
num_res = items[5]
num_par_vals = num_res*6
res_values = values[:num_par_vals]
cov_values = values[num_par_vals:]
energy = res_values[0::6]
spin = res_values[1::6]
gn = res_values[2::6]
gg = res_values[3::6]
gfa = res_values[4::6]
gfb = res_values[5::6]
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
# Build the upper-triangular covariance matrix
cov_dim = mpar*num_res
cov = np.zeros([cov_dim, cov_dim])
indices = np.triu_indices(cov_dim)
cov[indices] = cov_values
# Compact format - Resonances and individual uncertainties followed by
# compact correlations
elif lcomp == 2:
items, values = endf.get_list_record(file_obj)
num_res = items[5]
energy = values[0::12]
spin = values[1::12]
gn = values[2::12]
gg = values[3::12]
gfa = values[4::12]
gfb = values[5::12]
par_unc = []
for i in range(num_res):
res_unc = values[i*12+6 : i*12+12]
# Delete 0 values (not provided in evaluation)
res_unc = [x for x in res_unc if x != 0.0]
par_unc.extend(res_unc)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Create pandas DataFrame with resonacne data
columns = ['energy', 'J', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
parameters = pd.DataFrame.from_records(records, columns=columns)
# Determine mpar (number of parameters for each resonance in
# covariance matrix)
nparams, params = parameters.shape
covsize = cov.shape[0]
mpar = int(covsize/nparams)
# Add parameters from File 2
parameters = _add_file2_contributions(parameters,
resonance.parameters)
# Create instance of ReichMooreCovariance
rmc = cls(energy_min, energy_max, parameters, cov, mpar, lcomp,
resonance)
return rmc
_FORMALISMS = {
0: ResonanceCovarianceRange,
1: SingleLevelBreitWignerCovariance,
2: MultiLevelBreitWignerCovariance,
3: ReichMooreCovariance
# 7: RMatrixLimitedCovariance
}
| mit |
pratapvardhan/pandas | pandas/tests/indexes/interval/test_interval_tree.py | 4 | 3543 | from __future__ import division
import pytest
import numpy as np
from pandas import compat
from pandas._libs.interval import IntervalTree
import pandas.util.testing as tm
@pytest.fixture(
scope='class', params=['int32', 'int64', 'float32', 'float64', 'uint64'])
def dtype(request):
return request.param
@pytest.fixture(scope='class')
def tree(dtype):
left = np.arange(5, dtype=dtype)
return IntervalTree(left, left + 2)
class TestIntervalTree(object):
def test_get_loc(self, tree):
tm.assert_numpy_array_equal(tree.get_loc(1),
np.array([0], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(tree.get_loc(2)),
np.array([0, 1], dtype='int64'))
with pytest.raises(KeyError):
tree.get_loc(-1)
def test_get_indexer(self, tree):
tm.assert_numpy_array_equal(
tree.get_indexer(np.array([1.0, 5.5, 6.5])),
np.array([0, 4, -1], dtype='int64'))
with pytest.raises(KeyError):
tree.get_indexer(np.array([3.0]))
def test_get_indexer_non_unique(self, tree):
indexer, missing = tree.get_indexer_non_unique(
np.array([1.0, 2.0, 6.5]))
tm.assert_numpy_array_equal(indexer[:1],
np.array([0], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(indexer[1:3]),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(indexer[3:]),
np.array([-1], dtype='int64'))
tm.assert_numpy_array_equal(missing, np.array([2], dtype='int64'))
def test_duplicates(self, dtype):
left = np.array([0, 0, 0], dtype=dtype)
tree = IntervalTree(left, left + 1)
tm.assert_numpy_array_equal(np.sort(tree.get_loc(0.5)),
np.array([0, 1, 2], dtype='int64'))
with pytest.raises(KeyError):
tree.get_indexer(np.array([0.5]))
indexer, missing = tree.get_indexer_non_unique(np.array([0.5]))
tm.assert_numpy_array_equal(np.sort(indexer),
np.array([0, 1, 2], dtype='int64'))
tm.assert_numpy_array_equal(missing, np.array([], dtype='int64'))
def test_get_loc_closed(self, closed):
tree = IntervalTree([0], [1], closed=closed)
for p, errors in [(0, tree.open_left),
(1, tree.open_right)]:
if errors:
with pytest.raises(KeyError):
tree.get_loc(p)
else:
tm.assert_numpy_array_equal(tree.get_loc(p),
np.array([0], dtype='int64'))
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="int type mismatch on 32bit")
@pytest.mark.parametrize('leaf_size', [1, 10, 100, 10000])
def test_get_indexer_closed(self, closed, leaf_size):
x = np.arange(1000, dtype='float64')
found = x.astype('intp')
not_found = (-1 * np.ones(1000)).astype('intp')
tree = IntervalTree(x, x + 0.5, closed=closed, leaf_size=leaf_size)
tm.assert_numpy_array_equal(found, tree.get_indexer(x + 0.25))
expected = found if tree.closed_left else not_found
tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.0))
expected = found if tree.closed_right else not_found
tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.5))
| bsd-3-clause |
BhallaLab/moose-examples | tutorials/ChemicalBistables/propagationBis.py | 2 | 7109 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
"""
This example illustrates propagation of state flips in a
linear 1-dimensional reaction-diffusion system. It uses a
bistable system loaded in from a kkit definition file, and
places this in a tapering cylinder for pseudo 1-dimentionsional
diffusion.
This example illustrates a number of features of reaction-diffusion
calculations.
First, it shows how to set up such systems. Key steps are to create
the compartment and define its voxelization, then create the Ksolve,
Dsolve, and Stoich. Then we assign stoich.compartment, ksolve and
dsolve in that order. Finally we assign the path of the Stoich.
For running the model, we start by introducing
a small symmetry-breaking increment of concInit
of the molecule **b** in the last compartment on the cylinder. The model
starts out with molecules at equal concentrations, so that the system would
settle to the unstable fixed point. This symmetry breaking leads
to the last compartment moving towards the state with an
increased concentration of **b**,
and this effect propagates to all other compartments.
Once the model has settled to the state where **b** is high throughout,
we simply exchange the concentrations of **b** with **c** in the left
half of the cylinder. This introduces a brief transient at the junction,
which soon settles to a smooth crossover.
Finally, as we run the simulation, the tapering geometry comes into play.
Since the left hand side has a larger diameter than the right, the
state on the left gradually wins over and the transition point slowly
moves to the right.
"""
import math
import numpy
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import moose
import sys
def makeModel():
# create container for model
r0 = 1e-6 # m
r1 = 0.5e-6 # m. Note taper.
num = 200
diffLength = 1e-6 # m
comptLength = num * diffLength # m
diffConst = 20e-12 # m^2/sec
concA = 1 # millimolar
diffDt = 0.02 # for the diffusion
chemDt = 0.2 # for the reaction
mfile = '../../genesis/M1719.g'
model = moose.Neutral( 'model' )
compartment = moose.CylMesh( '/model/kinetics' )
# load in model
modelId = moose.loadModel( mfile, '/model', 'ee' )
a = moose.element( '/model/kinetics/a' )
b = moose.element( '/model/kinetics/b' )
c = moose.element( '/model/kinetics/c' )
ac = a.concInit
bc = b.concInit
cc = c.concInit
compartment.r0 = r0
compartment.r1 = r1
compartment.x0 = 0
compartment.x1 = comptLength
compartment.diffLength = diffLength
assert( compartment.numDiffCompts == num )
# Assign parameters
for x in moose.wildcardFind( '/model/kinetics/##[ISA=PoolBase]' ):
#print 'pools: ', x, x.name
x.diffConst = diffConst
# Make solvers
ksolve = moose.Ksolve( '/model/kinetics/ksolve' )
dsolve = moose.Dsolve( '/model/dsolve' )
# Set up clocks.
moose.setClock( 10, diffDt )
for i in range( 11, 17 ):
moose.setClock( i, chemDt )
stoich = moose.Stoich( '/model/kinetics/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.path = "/model/kinetics/##"
b.vec[num-1].concInit *= 1.01 # Break symmetry.
def main():
runtime = 100
displayInterval = 2
makeModel()
dsolve = moose.element( '/model/dsolve' )
moose.reinit()
#moose.start( runtime ) # Run the model for 10 seconds.
a = moose.element( '/model/kinetics/a' )
b = moose.element( '/model/kinetics/b' )
c = moose.element( '/model/kinetics/c' )
img = mpimg.imread( 'propBis.png' )
#imgplot = plt.imshow( img )
#plt.show()
plt.ion()
fig = plt.figure( figsize=(13,12.5) )
png = fig.add_subplot(211)
imgplot = plt.imshow( img )
plt.axis('off')
ax = fig.add_subplot(212)
ax.set_ylim( 0, 0.1 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Position along cylinder (microns)' )
plt.title( "Initial condition is at b==c, with small stimulus on the right of cylinder. State change propagates rapidly along cylinder" )
pos = numpy.arange( 0, a.vec.conc.size, 1 )
line1, = ax.plot( pos, a.vec.conc, 'r-', label='a' )
line2, = ax.plot( pos, b.vec.conc, 'g-', label='b' )
line3, = ax.plot( pos, c.vec.conc, 'b-', label='c' )
timeLabel = plt.text(60, 0.0009, 'time = 0')
plt.legend()
fig.canvas.draw()
for t in range( displayInterval, runtime, displayInterval ):
moose.start( displayInterval )
line1.set_ydata( a.vec.conc )
line2.set_ydata( b.vec.conc )
line3.set_ydata( c.vec.conc )
timeLabel.set_text( "time = %d" % t )
fig.canvas.draw()
plt.title( 'Swapping concs of b and c in the left half the cylinder. Boundary slowly moves right due to taper.')
for i in range( b.numData/2 ):
temp = b.vec[i].conc
b.vec[i].conc = c.vec[i].conc
c.vec[i].conc = temp
newruntime = 200
for t in range( displayInterval, newruntime, displayInterval ):
moose.start( displayInterval )
line1.set_ydata( a.vec.conc )
line2.set_ydata( b.vec.conc )
line3.set_ydata( c.vec.conc )
timeLabel.set_text( "time = %d" % (t + runtime) )
fig.canvas.draw()
print( "Hit 'enter' to exit" )
try:
raw_input( )
except NameError as e: # python3
input( )
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| gpl-2.0 |
chrisdjscott/Atoman | atoman/gui/mainWindow.py | 1 | 26606 | # -*- coding: utf-8 -*-
"""
The main window class
@author: Chris Scott
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
import shutil
import platform
import tempfile
import traceback
import logging
import datetime
import PySide2
from PySide2 import QtGui, QtCore, QtWidgets
import vtk
import numpy as np
import matplotlib
import scipy
from ..visutils.utilities import iconPath, resourcePath, dataPath
from ..system import atoms
from ..system.atoms import elements
from . import toolbar as toolbarModule
from . import preferences
from . import rendererSubWindow
from . import systemsDialog
from . import viewPorts
from .dialogs import simpleDialogs
from .dialogs import bondEditor
from .dialogs import elementEditor
from .dialogs import consoleWindow
from .. import _version
################################################################################
class MainWindow(QtWidgets.QMainWindow):
"""
The main window.
"""
configDir = os.path.join(os.environ["HOME"], ".atoman")
def __init__(self, desktop, parent=None, testing=False):
super(MainWindow, self).__init__(parent)
# logger
self.logger = logging.getLogger(__name__)
# first time show called
self.testingFlag = testing
self.firstShow = True
# QDesktopWidget: gives access to screen geometry, which screen we're displayed on, etc...
self.desktop = desktop
# initialise user interface
self.initUI()
# start threadpool
self.threadPool = QtCore.QThreadPool(self)
if self.threadPool.maxThreadCount() < 2:
self.threadPool.setMaxThreadCount(2)
# set focus
self.setFocus()
def initUI(self):
"""Initialise the interface."""
logger = self.logger
logger.debug("Initialising user interface")
# defaults (TODO: remove this)
self.refLoaded = False
# MD code resource (not currently used!?)
logger.debug("MD resource path: %s (exists %s)", resourcePath("lbomd.IN", dirname="md_input"),
os.path.exists(resourcePath("lbomd.IN", dirname="md_input")))
# get settings object
settings = QtCore.QSettings()
# initial directory
currentDir = str(settings.value("mainWindow/currentDirectory", ""))
logger.debug("Settings dir: '%s'", currentDir)
if hasattr(sys, "_MEIPASS"):
if not len(currentDir) or not os.path.exists(currentDir):
# change to home directory if running from pyinstaller bundle
currentDir = os.environ.get("HOME")
logger.debug("Change dir $HOME: '%s'", currentDir)
else:
currentDir = os.getcwd()
logger.debug("Use CWD: '%s'", currentDir)
os.chdir(currentDir)
# toolbar size (fixed)
self.mainToolbarWidth = 350
self.mainToolbarHeight = 460
# default window widget size
self.renderWindowWidth = 760 * 1.2
self.renderWindowHeight = 715 * 1.2
# default size
windowWidth = self.renderWindowWidth + self.mainToolbarWidth
windowHeight = self.renderWindowHeight
self.defaultWindowWidth = windowWidth
self.defaultWindowHeight = windowHeight
# resize
self.resize(settings.value("mainWindow/size", QtCore.QSize(windowWidth, windowHeight)))
# location
self.centre()
self.setWindowTitle("Atoman")
# create temporary directory for working in (needs to force tmp on mac so POV-Ray can run in it)
self.tmpDirectory = tempfile.mkdtemp(prefix="atoman-", dir="/tmp")
# console window for logging output to
self.console = consoleWindow.ConsoleWindow(self)
# image viewer
self.imageViewer = simpleDialogs.ImageViewer(self, parent=self)
# preferences dialog
self.preferences = preferences.PreferencesDialog(self, parent=self)
# bonds editor
self.bondsEditor = bondEditor.BondEditorDialog(parent=self)
# add file actions
exitAction = self.createAction("Exit", self.close, "Ctrl-Q", "oxygen/application-exit.png", "Exit application")
openFileAction = self.createAction("Open file", slot=self.showOpenFileDialog, icon="oxygen/document-open.png",
tip="Open file")
openRemoteFileAction = self.createAction("Open remote file", slot=self.showOpenRemoteFileDialog,
icon="oxygen/document-open-remote.png", tip="Open remote file")
openCWDAction = self.createAction("Open CWD", slot=self.openCWD, icon="oxygen/folder-open.png",
tip="Open current working directory")
exportElementsAction = self.createAction("Export elements", slot=self.exportElements,
icon="oxygen/document-export", tip="Export element properties")
importElementsAction = self.createAction("Import elements", slot=self.importElements,
icon="oxygen/document-import.png", tip="Import element properties")
resetElementsAction = self.createAction("Reset elements", slot=self.resetElements, icon="oxygen/edit-undo.png",
tip="Reset elements settings to default values")
exportBondsAction = self.createAction("Export bonds", slot=self.exportBonds,
icon="oxygen/document-export.png", tip="Export bonds file")
importBondsAction = self.createAction("Import bonds", slot=self.importBonds,
icon="oxygen/document-import.png", tip="Import bonds file")
resetBondsAction = self.createAction("Reset bonds", slot=self.resetBonds, icon="oxygen/edit-undo.png",
tip="Reset bonds settings to default values")
showImageViewerAction = self.createAction("Image viewer", slot=self.showImageViewer,
icon="oxygen/applications-graphics.png", tip="Show image viewer")
showPreferencesAction = self.createAction("Preferences", slot=self.showPreferences,
icon="oxygen/configure.png", tip="Show preferences window")
changeCWDAction = self.createAction("Change CWD", slot=self.changeCWD, icon="oxygen/folder-new.png",
tip="Change current working directory")
# add file menu
fileMenu = self.menuBar().addMenu("&File")
self.addActions(fileMenu, (openFileAction, openRemoteFileAction,
openCWDAction, changeCWDAction, None, exitAction))
# settings menu
settingsMenu = self.menuBar().addMenu("&Settings")
self.addActions(settingsMenu, (importElementsAction, exportElementsAction, resetElementsAction,
importBondsAction, exportBondsAction, resetBondsAction))
# button to show console window
openConsoleAction = self.createAction("Console", self.showConsole, None, "oxygen/utilities-log-viewer.png",
"Show console window")
# element editor action
openElementEditorAction = self.createAction("Element editor", slot=self.openElementEditor,
icon="other/periodic-table-icon.png", tip="Show element editor")
# open bonds editor action
openBondsEditorAction = self.createAction("Bonds editor", slot=self.openBondsEditor, icon="other/molecule1.png",
tip="Show bonds editor")
# default window size action
defaultWindowSizeAction = self.createAction("Default size", slot=self.defaultWindowSize,
icon="oxygen/view-restore.png", tip="Resize window to default size")
# add view menu
viewMenu = self.menuBar().addMenu("&View")
self.addActions(viewMenu, (openConsoleAction, showImageViewerAction, openElementEditorAction,
openBondsEditorAction, showPreferencesAction))
# add window menu
windowMenu = self.menuBar().addMenu("&Window")
self.addActions(windowMenu, (defaultWindowSizeAction,))
# add file toolbar
fileToolbar = self.addToolBar("File")
fileToolbar.addAction(exitAction)
fileToolbar.addSeparator()
fileToolbar.addAction(openFileAction)
fileToolbar.addAction(openRemoteFileAction)
fileToolbar.addAction(openCWDAction)
fileToolbar.addAction(changeCWDAction)
fileToolbar.addSeparator()
# util tool bar
viewToolbar = self.addToolBar("Utilities")
viewToolbar.addAction(openConsoleAction)
viewToolbar.addAction(showImageViewerAction)
viewToolbar.addAction(openElementEditorAction)
viewToolbar.addAction(openBondsEditorAction)
viewToolbar.addAction(showPreferencesAction)
viewToolbar.addSeparator()
# vis tool bar
visToolbar = self.addToolBar("Visualisation")
numViewPortsCombo = QtWidgets.QComboBox()
numViewPortsCombo.addItem("1")
numViewPortsCombo.addItem("2")
numViewPortsCombo.addItem("4")
numViewPortsCombo.currentIndexChanged[str].connect(self.numViewPortsChanged)
visToolbar.addWidget(QtWidgets.QLabel("View ports:"))
visToolbar.addWidget(numViewPortsCombo)
visToolbar.addSeparator()
# add about action
aboutAction = self.createAction("About Atoman", slot=self.aboutMe, icon="oxygen/help-about.png",
tip="About Atoman")
helpAction = self.createAction("Atoman Help", slot=self.showHelp, icon="oxygen/help-browser.png",
tip="Show help window (opens in external browser)")
# add help toolbar
helpToolbar = self.addToolBar("Help")
helpToolbar.addAction(aboutAction)
helpToolbar.addAction(helpAction)
helpMenu = self.menuBar().addMenu("&Help")
self.addActions(helpMenu, (aboutAction, helpAction))
# add cwd to status bar
self.currentDirectoryLabel = QtWidgets.QLabel("")
self.updateCWD()
sb = QtWidgets.QStatusBar()
self.setStatusBar(sb)
self.progressBar = QtWidgets.QProgressBar(self.statusBar())
self.statusBar().addPermanentWidget(self.progressBar)
self.statusBar().addPermanentWidget(self.currentDirectoryLabel)
self.hideProgressBar()
# dict of currently loaded systems
self.loaded_systems = {}
# systems dialog
self.systemsDialog = systemsDialog.SystemsDialog(self, self)
# element editor
self.elementEditor = elementEditor.ElementEditor(parent=self)
# view ports / renderer windows
self.rendererWindows = [] # TODO: remove
self.viewPorts = viewPorts.ViewPortsWidget(parent=self)
self.numViewPortsChanged(int(numViewPortsCombo.currentText()))
self.setCentralWidget(self.viewPorts)
# add the main tool bar
self.mainToolbar = toolbarModule.MainToolbar(self, self.mainToolbarWidth, self.mainToolbarHeight)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.mainToolbar)
self.setStatus('Ready')
def showOpenRemoteFileDialog(self):
"""
Open remote file
"""
self.systemsDialog.load_system_form.readerForm.openSFTPBrowser()
def showOpenFileDialog(self):
"""
Open file
"""
self.systemsDialog.load_system_form.readerForm.openFileDialog()
def defaultWindowSize(self):
"""
Resize window to default size
"""
self.resize(self.defaultWindowWidth, self.defaultWindowHeight)
def changeCWD(self):
"""
Change current working directory...
"""
new_dir = QtWidgets.QFileDialog.getExistingDirectory(self, "New working directory", os.getcwd())
logging.debug("Changing directory: '%s'", new_dir)
if new_dir and os.path.isdir(new_dir):
os.chdir(new_dir)
self.updateCWD()
def rendererWindowActivated(self, sw):
"""
Sub window activated. (TEMPORARY)
"""
pass
def numViewPortsChanged(self, num_str):
"""Update the number of view ports."""
self.viewPorts.numViewPortsChanged(int(num_str))
self.rendererWindows = self.viewPorts.getViewPorts()
for rw in self.rendererWindows:
rw.outputDialog.imageTab.imageSequenceTab.refreshLinkedRenderers()
def showPreferences(self):
"""
Show preferences window.
"""
self.preferences.hide()
self.preferences.show()
def showImageViewer(self):
"""
Show the image viewer.
"""
self.imageViewer.hide()
self.imageViewer.show()
def openBondsEditor(self):
"""
Open bonds editor
"""
self.bondsEditor.show()
def openElementEditor(self):
"""
Open element editor.
"""
self.elementEditor.show()
def importElements(self):
"""
Import element properties file.
"""
msg = "This will overwrite the current element properties file. You should create a backup first!\n\n"
msg += "Do you wish to continue?"
reply = QtWidgets.QMessageBox.question(self, "Message", msg,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
# open file dialog
title = "Atoman - Import element properties"
fname = QtWidgets.QFileDialog.getOpenFileName(self, title, ".", "IN files (*.IN)")[0]
if fname:
self.logger.info("Importing elements settings from '%s'", fname)
# read in new file
elements.read(fname)
# overwrite current file
elements.write(dataPath("atoms.IN"))
# set on Lattice objects too
self.inputState.refreshElementProperties()
self.refState.refreshElementProperties()
def exportElements(self):
"""
Export element properties to file.
"""
fname = os.path.join(".", "atoms-exported.IN")
fname = QtWidgets.QFileDialog.getSaveFileName(self, "Atoman - Export element properties", fname,
"IN files (*.IN)",
options=QtWidgets.QFileDialog.DontUseNativeDialog)[0]
if fname:
if "." not in fname or fname[-3:] != ".IN":
fname += ".IN"
self.logger.info("Exporting elements settings to '%s'", fname)
elements.write(fname)
def resetElements(self):
"""Reset elements settings."""
msg = "This will overwrite the current element properties file. You should create a backup first!\n\n"
msg += "Do you wish to continue?"
reply = QtWidgets.QMessageBox.question(self, "Message", msg,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
atoms.resetAtoms()
def resetBonds(self):
"""Reset bonds settings."""
msg = "This will overwrite the current bonds file. You should create a backup first!\n\n"
msg += "Do you wish to continue?"
reply = QtWidgets.QMessageBox.question(self, "Message", msg,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
atoms.resetBonds()
def importBonds(self):
"""
Import bonds file.
"""
msg = "This will overwrite the current bonds file. You should create a backup first!\n\n"
msg += "Do you wish to continue?"
reply = QtWidgets.QMessageBox.question(self, "Message", msg,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
# open file dialog
fname = QtWidgets.QFileDialog.getOpenFileName(self, "Atoman - Import bonds file", ".", "IN files (*.IN)",
options=QtWidgets.QFileDialog.DontUseNativeDialog)[0]
if fname:
self.logger.info("Import bonds settings from '%s'", fname)
# read in new file
elements.readBonds(fname)
# overwrite current file
elements.writeBonds(dataPath("bonds.IN"))
self.setStatus("Imported bonds file")
def exportBonds(self):
"""
Export bonds file.
"""
fname = os.path.join(".", "bonds-exported.IN")
fname = QtWidgets.QFileDialog.getSaveFileName(self, "Atoman - Export bonds file", fname, "IN files (*.IN)",
options=QtWidgets.QFileDialog.DontUseNativeDialog)[0]
if fname:
if "." not in fname or fname[-3:] != ".IN":
fname += ".IN"
self.logger.info("Exporting bonds settings to '%s'", fname)
elements.writeBonds(fname)
self.setStatus("Bonds file exported")
def openCWD(self):
"""
Open current working directory.
"""
dirname = os.getcwd()
osname = platform.system()
if osname == "Linux":
os.system("xdg-open '%s'" % dirname)
elif osname == "Darwin":
os.system("open '%s'" % dirname)
elif osname == "Windows":
os.startfile(dirname)
def centre(self):
"""
Centre the window.
"""
qr = self.frameGeometry()
cp = QtWidgets.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def showConsole(self):
"""
Open the console window
"""
self.console.show()
def showHelp(self, relativeUrl=None):
"""
Show the help window.
"""
baseUrl = 'https://chrisdjscott.github.io/Atoman/'
if relativeUrl is not None and relativeUrl:
url = QtCore.QUrl(os.path.join(baseUrl, relativeUrl))
else:
url = QtCore.QUrl(baseUrl)
self.logger.debug("Opening help url: {0}".format(url.toString()))
QtGui.QDesktopServices.openUrl(url)
def renderWindowClosed(self):
"""
A render window has been closed.
"""
i = 0
while i < len(self.rendererWindows):
rw = self.rendererWindows[i]
if rw.closed:
self.rendererWindows.pop(i)
self.rendererWindowsSubWin.pop(i)
else:
i += 1
for rw in self.rendererWindows:
rw.outputDialog.imageTab.imageSequenceTab.refreshLinkedRenderers()
def confirmCloseEvent(self):
"""
Show a dialog to confirm closeEvent.
"""
dlg = simpleDialogs.ConfirmCloseDialog(self)
close = False
clearSettings = False
reply = dlg.exec_()
if reply:
close = True
if dlg.clearSettingsCheck.isChecked():
clearSettings = True
return close, clearSettings
def closeEvent(self, event):
"""
Catch attempt to close
"""
if self.testingFlag:
event.accept()
else:
close, clearSettings = self.confirmCloseEvent()
if close:
self.tidyUp()
if clearSettings:
self.clearSettings()
else:
self.saveSettings()
event.accept()
else:
event.ignore()
def clearSettings(self):
"""
Clear settings.
"""
# settings object
settings = QtCore.QSettings()
settings.clear()
def saveSettings(self):
"""
Save settings before exit.
"""
# settings object
settings = QtCore.QSettings()
# store current working directory
settings.setValue("mainWindow/currentDirectory", os.getcwd())
# window size
settings.setValue("mainWindow/size", self.size())
def tidyUp(self):
"""
Tidy up before close application
"""
shutil.rmtree(self.tmpDirectory)
self.console.accept()
self.threadPool.waitForDone()
def hideProgressBar(self):
"""
Hide the progress bar
"""
self.progressBar.hide()
self.progressBar.reset()
self.setStatus("Finished")
def updateProgress(self, n, nmax, message):
"""
Update progress bar
"""
self.progressBar.show()
self.progressBar.setRange(0, nmax)
self.progressBar.setValue(n)
self.setStatus(message)
QtWidgets.QApplication.processEvents()
def setStatus(self, message):
"""
Set temporary status in status bar
"""
self.statusBar().showMessage(self.tr(message))
def updateCWD(self):
"""
Updates the CWD label in the status bar.
"""
dirname = os.getcwd()
self.currentDirectoryLabel.setText("CWD: '%s'" % dirname)
self.imageViewer.changeDir(dirname)
def readLBOMDIN(self):
"""
Try to read sim identity and PBCs from lbomd.IN
"""
logger = logging.getLogger(__name__)
if os.path.exists("lbomd.IN"):
f = open("lbomd.IN")
try:
f.readline()
f.readline()
f.readline()
f.readline()
line = f.readline().strip()
array = line.split()
try:
PBC = [0] * 3
PBC[0] = int(array[0])
PBC[1] = int(array[1])
PBC[2] = int(array[2])
except IndexError:
logger.warning("Index error 2 (check lbomd.IN format)")
except Exception:
err = "Read lbomd.IN failed with error:\n\n%s" % "".join(traceback.format_exception(*sys.exc_info()))
self.displayError(err)
finally:
f.close()
def displayWarning(self, message):
"""
Display warning message.
"""
msgBox = QtWidgets.QMessageBox(self)
msgBox.setText(message)
msgBox.setWindowFlags(msgBox.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgBox.setIcon(QtWidgets.QMessageBox.Warning)
msgBox.exec_()
def displayError(self, message):
"""
Display error message
"""
msgBox = QtWidgets.QMessageBox(self)
msgBox.setText(message)
msgBox.setWindowFlags(msgBox.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgBox.setIcon(QtWidgets.QMessageBox.Critical)
msgBox.exec_()
def aboutMe(self):
"""
Display about message.
"""
msgBox = QtWidgets.QMessageBox(self)
# get the version right
version = _version.get_versions()['version']
# construct paragraph with software versions
softline = "Python %s - Qt %s - PySide2 %s - VTK %s" % (platform.python_version(), QtCore.__version__,
PySide2.__version__, vtk.vtkVersion.GetVTKVersion())
softline += " - NumPy %s - SciPy %s - Matplotlib %s" % (np.__version__, scipy.__version__,
matplotlib.__version__)
# add paramiko if available
try:
import paramiko
except ImportError:
pass
else:
softline += " - paramiko %s" % paramiko.__version__
softline += " on %s" % platform.system()
msgBox.setText("""<p><b>Atoman</b> %s</p>
<p>Copyright © %d Chris Scott</p>
<p>This application can be used to visualise atomistic simulations.</p>
<p>GUI based on <a href="http://sourceforge.net/projects/avas/">AVAS</a>
by Marc Robinson.</p>
<p>%s</p>""" % (version, datetime.date.today().year, softline))
msgBox.setWindowFlags(msgBox.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok)
msgBox.setIcon(QtWidgets.QMessageBox.Information)
msgBox.exec_()
# dlg = dialogs.AboutMeDialog(parent=self)
# dlg.exec_()
def createAction(self, text, slot=None, shortcut=None, icon=None, tip=None, checkable=False):
"""
Create an action
"""
action = QtWidgets.QAction(text, self)
if icon is not None:
action.setIcon(QtGui.QIcon(iconPath(icon)))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip("<p>{0}</p>".format(tip))
action.setStatusTip(tip)
if callable(slot):
action.triggered.connect(slot)
if checkable:
action.setCheckable(True)
return action
def addActions(self, target, actions):
"""
Add a tuple of actions to the target.
"""
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
| mit |
hkhpub/kagglepy | titanic/refs/forestref.py | 1 | 4202 | """ Writing my first randomforest code.
Author : AstroDave
Date : 23rd September 2012
Revised: 15 April 2014
please see packages.python.org/milk/randomforests.html for more
"""
import pandas as pd
import numpy as np
import csv as csv
from sklearn.ensemble import RandomForestClassifier
# Data cleanup
# TRAIN DATA
train_df = pd.read_csv('/home/hkh/sources/kagglepy/titanic/data/train.csv', header=0) # Load the train file into a dataframe
# I need to convert all strings to integer classifiers.
# I need to fill in the missing values of the data and make it complete.
# female = 0, Male = 1
train_df['Gender'] = train_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# Note this is not ideal: in translating categories to numbers, Port "2" is not 2 times greater than Port "1", etc.
# All missing Embarked -> just make them embark from most common place
if len(train_df.Embarked[ train_df.Embarked.isnull() ]) > 0:
train_df.Embarked[ train_df.Embarked.isnull() ] = train_df.Embarked.dropna().mode().values
Ports = list(enumerate(np.unique(train_df['Embarked']))) # determine all values of Embarked,
Ports_dict = { name : i for i, name in Ports } # set up a dictionary in the form Ports : index
train_df.Embarked = train_df.Embarked.map( lambda x: Ports_dict[x]).astype(int) # Convert all Embark strings to int
# All the ages with no data -> make the median of all Ages
median_age = train_df['Age'].dropna().median()
if len(train_df.Age[ train_df.Age.isnull() ]) > 0:
train_df.loc[ (train_df.Age.isnull()), 'Age'] = median_age
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
train_df = train_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# TEST DATA
test_df = pd.read_csv('/home/hkh/sources/kagglepy/titanic/data/test.csv', header=0) # Load the test file into a dataframe
# I need to do the same with the test data now, so that the columns are the same as the training data
# I need to convert all strings to integer classifiers:
# female = 0, Male = 1
test_df['Gender'] = test_df['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Embarked from 'C', 'Q', 'S'
# All missing Embarked -> just make them embark from most common place
if len(test_df.Embarked[ test_df.Embarked.isnull() ]) > 0:
test_df.Embarked[ test_df.Embarked.isnull() ] = test_df.Embarked.dropna().mode().values
# Again convert all Embarked strings to int
test_df.Embarked = test_df.Embarked.map( lambda x: Ports_dict[x]).astype(int)
# All the ages with no data -> make the median of all Ages
median_age = test_df['Age'].dropna().median()
if len(test_df.Age[ test_df.Age.isnull() ]) > 0:
test_df.loc[ (test_df.Age.isnull()), 'Age'] = median_age
# All the missing Fares -> assume median of their respective class
if len(test_df.Fare[ test_df.Fare.isnull() ]) > 0:
median_fare = np.zeros(3)
for f in range(0,3): # loop 0 to 2
median_fare[f] = test_df[ test_df.Pclass == f+1 ]['Fare'].dropna().median()
for f in range(0,3): # loop 0 to 2
test_df.loc[ (test_df.Fare.isnull()) & (test_df.Pclass == f+1 ), 'Fare'] = median_fare[f]
# Collect the test data's PassengerIds before dropping it
ids = test_df['PassengerId'].values
# Remove the Name column, Cabin, Ticket, and Sex (since I copied and filled it to Gender)
test_df = test_df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'PassengerId'], axis=1)
# The data is now ready to go. So lets fit to the train, then predict to the test!
# Convert back to a numpy array
train_data = train_df.values
test_data = test_df.values
print 'Training...'
forest = RandomForestClassifier(n_estimators=100)
forest = forest.fit( train_data[0::,1::], train_data[0::,0] )
print 'Predicting...'
output = forest.predict(test_data).astype(int)
predictions_file = open("/home/hkh/sources/kagglepy/titanic/output/myfirstforest.csv", "wb")
open_file_object = csv.writer(predictions_file)
open_file_object.writerow(["PassengerId","Survived"])
open_file_object.writerows(zip(ids, output))
predictions_file.close()
print 'Done.'
| mit |
FRESNA/vresutils | setup.py | 1 | 1270 | from __future__ import absolute_import
from setuptools import setup, find_packages
from io import open
with open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name='vresutils',
author='Jonas Hoersch (KIT,FIAS), David Schlachtberger (FIAS), Sarah Becker (FIAS)',
author_email='[email protected]',
description='Varying Renewable Energy System Utilities',
long_description=long_description,
url='https://github.com/FRESNA/vresutils',
license='GPLv3',
packages=find_packages(exclude=['doc', 'test']),
use_scm_version={'write_to': 'vresutils/version.py'},
setup_requires=['setuptools_scm'],
install_requires=['countrycode', 'fiona', 'matplotlib',
'memory_profiler',
'networkx>=2', 'numpy', 'pandas>=0.19.0',
'pyomo', 'scipy', 'pyproj', 'pyshp', 'rasterio>=1.0',
'shapely', 'six'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: OS Independent',
])
| gpl-3.0 |
TESScience/SPyFFI | Catalogs.py | 1 | 16338 | """Keep track of Catalogs of objects, usually stars."""
import os.path
import logging
import matplotlib.animation
from astroquery.vizier import Vizier
import zachopy.star
import astropy.coordinates
import astropy.units
import zachopy.utils
import numpy as np
import matplotlib.pylab as plt
import settings
import relations
import Lightcurve
from settings import log_file_handler
logger = logging.getLogger(__name__)
logger.addHandler(log_file_handler)
def makeCatalog(**kwargs):
"""use keywords to select a kind of Catalog,
enter its parameters and construct it,
and return the catalog object"""
# pull out the name of the catalog
name = kwargs['name']
# make either a test pattern, or a real star catalog, or something else?
if name.lower() == 'testpattern':
# make a gridded test pattern of stars, with keywords passed
cat = TestPattern(**kwargs)
elif name.lower() == 'ucac4':
# make a catalog from UCAC4, with keywords passed
cat = UCAC4(**kwargs)
else:
# interpret the name as a single star, and draw a catalog around it
star = zachopy.star.Star(name)
kwargs['ra'], kwargs['dec'] = star.icrs.ra.deg, star.icrs.dec.deg
cat = UCAC4(**kwargs)
return cat
class Star(object):
"""a Star object, containing at least RA + Dec + magnitude"""
def __init__(self, ra=0.0, dec=0.0, tmag=10.0, **kwargs):
"""initialize a star, with a coordinate, magnitude, (and more?)"""
# the coordinate object stores the ICRS
self.coord = astropy.coordinates.ICRS(ra=ra, dec=dec,
unit=(astropy.units.deg, astropy.units.deg))
self.ra = ra
self.dec = dec
self.tmag = tmag
for k in kwargs.keys():
self.__dict__[k] = kwargs[k]
class Catalog(object):
"""an object to keep track of lots of stars"""
def __init__(self):
# decide whether or not this Catalog is chatty
self.directory = 'catalogs/'
zachopy.utils.mkdir(os.path.join(settings.intermediates, self.directory))
def addLCs(self, fainteststarwithlc=None, fractionofstarswithlc=1.0, seed=None, **kw):
"""
addLCs() populates a catalog with light curves.
addLCs() makes use of these keywords:
fainteststarwithlc=None (what's the faintest magnitude star that should be populated with a lightcurve?)
fractionofstarswithlc=1.0 (what fraction of eligible stars should be populated with lightcurves, from 0 to 1)
seed=None (if you want the exact light curves on multiple calls, set a value for the seed)
addLCs() passes additional keywords to SPyFFI.Lightcurve.random(**kw):
random() makes use of these keyword arguments:
options=['trapezoid', 'sin'] (a list of the kinds of a variability to choose from)
fractionwithextremelc=False (should we allow fractionwithextremelc variability [good for movies] or no?)
"""
np.random.seed(seed)
# total number of stars we need to deal with
ntotal = len(self.tmag)
# make sure everything is at least populated as a constant
constant = Lightcurve.constant()
self.lightcurves = np.array([constant] * ntotal)
# make sure that the maximum magnitude for variable stars is defined
if fainteststarwithlc is None:
fainteststarwithlc = np.max(self.tmag) + 1
# pull only the stars that pass the brightness cut
brightenough = (self.tmag <= fainteststarwithlc).nonzero()[0]
nbrightenough = len(brightenough)
logger.info(
'{0} stars are brighter than {1}; '
'populating {2:.1f}% of them with light curves'.format(
nbrightenough,
fainteststarwithlc,
fractionofstarswithlc * 100))
# use the input seed, to ensure it wor
for i in np.random.choice(brightenough, len(brightenough) * fractionofstarswithlc, replace=False):
self.lightcurves[i] = Lightcurve.random(**kw)
@property
def lightcurvecodes(self):
"""return an array of the light curve codes"""
return [lc.code for lc in self.lightcurves]
def arrays(self):
"""return (static) arrays of positions, magnitudes, and effective temperatures"""
return self.ra, self.dec, self.tmag, self.temperature
def snapshot(self, bjd=None, epoch=None, exptime=0.5 / 24.0):
"""return a snapshot of positions, magnitudes, and effective temperatures
(all of which may be time-varying)"""
# propagate proper motions
if bjd is not None:
epoch = (bjd - 2451544.5) / 365.25 + 2000.0
else:
bjd = (epoch - 2000.0) * 365.25 + 2451544.5
ra, dec = self.atEpoch(epoch)
# determine brightness of star
moment = np.array([lc.integrated(bjd, exptime) for lc in self.lightcurves]).flatten()
tmag = self.tmag + moment
# determine color of star
temperature = self.temperature
assert (ra.shape == tmag.shape)
return ra, dec, tmag, temperature
def atEpoch(self, epoch):
# how many years since the catalog's epoch?
timeelapsed = epoch - self.epoch # in years
logger.info('projecting catalog {0:.3f} years relative to {1:.0f}'.format(timeelapsed, self.epoch))
# calculate the dec
decrate = self.pmdec / 60.0 / 60.0 / 1000.0 # in degrees/year (assuming original was in mas/year)
decindegrees = self.dec + timeelapsed * decrate
# calculate the unprojected rate of RA motion, using the mean declination between the catalog and present epoch
rarate = self.pmra / 60.0 / 60.0 / np.cos((
self.dec + timeelapsed * decrate / 2.0) * np.pi / 180.0) / 1000.0 # in degress of RA/year (assuming original was *projected* mas/year)
raindegrees = self.ra + timeelapsed * rarate
# return the current positions
return raindegrees, decindegrees
def plot(self, epoch=2018.0):
plt.ion()
plt.figure('star chart')
try:
self.ax.cla()
except:
self.ax = plt.subplot()
ra, dec, tmag, temperature = self.snapshot(epoch=epoch)
deltamag = 20.0 - tmag
size = deltamag ** 2 * 5
try:
self.plotdata.set_data(ra, dec)
except:
self.plotdata = self.ax.scatter(ra, dec, s=size, marker='o', color='grey', alpha=0.3, edgecolors='black')
# for i in range(len(ra)):
# self.ax.text(ra[i], dec[i], '{0:.2f}'.format(tmag[i]),horizontalalignment='center', verticalalignment='center', alpha=0.5, size=8, color='green',weight='bold')
self.ax.set_aspect(1)
self.ax.set_xlabel('Right Ascension')
self.ax.set_ylabel('Declination')
self.ax.set_title('{0} at epoch {1}'.format(self.__class__.__name__, epoch))
self.ax.set_xlim(np.min(self.ra), np.max(self.ra))
self.ax.set_ylim(np.min(self.dec), np.max(self.dec))
plt.draw()
def movie(self, epochs=[1950, 2050], bitrate=10000):
metadata = dict(artist='Zach Berta-Thompson ([email protected])')
self.writer = matplotlib.animation.FFMpegWriter(fps=30, metadata=metadata, bitrate=bitrate)
self.plot(np.min(epochs))
f = plt.gcf()
filename = settings.dirs['plots'] + 'testcatalogpropermotions.mp4'
with self.writer.saving(f, filename, 100):
for e in np.linspace(epochs[0], epochs[1], 20):
logger.info('{0}'.format(e))
self.plot(e)
self.writer.grab_frame()
logger.info('saved movie to {0}'.format(filename))
def writeProjected(self, ccd=None, outfile='catalog.txt'):
# take a snapshot projection of the catalog
ccd.camera.cartographer.ccd = ccd
ras, decs, tmag, temperatures = self.snapshot(ccd.camera.bjd,
exptime=ccd.camera.cadence / 60.0 / 60.0 / 24.0)
# calculate the CCD coordinates of these stars
stars = ccd.camera.cartographer.point(ras, decs, 'celestial')
x, y = stars.ccdxy.tuple
basemag = self.tmag
lc = self.lightcurvecodes
# does the temperature matter at all? (does the PSF have multiple temperatures available?)
if len(ccd.camera.psf.binned_axes['stellartemp']) > 1:
data = [ras, decs, self.pmra, self.pmdec, x, y, basemag, temperatures, lc]
names = ['ra', 'dec', 'pmracosdec_mas', 'pmdec_mas', 'x', 'y', 'tmag', 'stellaratemperature', 'lc']
else:
data = [ras, decs, self.pmra, self.pmdec, x, y, basemag, lc]
names = ['ra', 'dec', 'pmracosdec_mas', 'pmdec_mas', 'x', 'y', 'tmag', 'lc']
t = astropy.table.Table(data=data, names=names)
t.write(outfile, format='ascii.fixed_width', delimiter=' ')
logger.info("save projected star catalog {0}".format(outfile))
class TestPattern(Catalog):
"""a test pattern catalog, creating a grid of stars to fill an image"""
def __init__(self, lckw=None, starsarevariable=True, **kwargs):
"""create a size x size square (in arcsecs) test pattern of stars,
with spacing (in arcsecs) between each element and
magnitudes spanning the range of magnitudes"""
Catalog.__init__(self)
self.load(**kwargs)
if starsarevariable:
self.addLCs(**lckw)
else:
self.addLCs(fractionofstarswithlc=0.0)
def load(self,
size=3000.0, # the overall size of the grid
spacing=200.0, # how far apart are stars from each other (")
magnitudes=[6, 16], # list of min, max magnitudes
ra=0.0, dec=0.0, # made-up center of pattern
randomizenudgesby=21.1, # how far to nudge stars (")
randomizepropermotionsby=0.0, # random prop. mot. (mas/yr)
randomizemagnitudes=False, # randomize the magnitudes?
**kwargs):
# set the name of the catalog
self.name = 'testpattern_{0:.0f}to{1:.0f}'.format(np.min(magnitudes), np.max(magnitudes))
# how many stars do we need?
pixels = np.maximum(np.int(size / spacing), 1)
n = pixels ** 2
# construct a linear grid of magnitudes
self.tmag = np.linspace(np.min(magnitudes), np.max(magnitudes), n)[::-1]
# create a rigid grid of RA and Dec, centered at 0
ras, decs = np.meshgrid(np.arange(pixels) * spacing, np.arange(pixels) * spacing)
# offset these (not, make small angle approximations which will fail!)
self.dec = ((decs - np.mean(decs)) / 3600.0 + dec).flatten()
self.ra = (ras - np.mean(ras)).flatten() / np.cos(self.dec * np.pi / 180.0) / 3600.0 + ra
# randomly nudge all of the stars (to prevent hitting same parts of pixels)
if randomizenudgesby > 0:
offset = randomizenudgesby * (np.random.rand(2, n) - 0.5) / 3600.0
self.dec += offset[0, :]
self.ra += offset[1, :] * np.cos(self.dec * np.pi / 180.0)
# draw the magnitudes of the stars totally randomly
if randomizemagnitudes:
self.tmag = np.random.uniform(np.min(magnitudes), np.max(magnitudes), n)
# make up some imaginary proper motions
if randomizepropermotionsby > 0:
self.pmra = np.random.normal(0, randomizepropermotionsby, n)
self.pmdec = np.random.normal(0, randomizepropermotionsby, n)
else:
self.pmra, self.pmdec = np.zeros(n), np.zeros(n)
self.epoch = 2018.0
self.temperature = 5800.0 + np.zeros_like(self.ra)
class UCAC4(Catalog):
def __init__(self, ra=0.0, dec=90.0,
radius=0.2,
write=True,
fast=False,
lckw=None, starsarevariable=True, faintlimit=None, **kwargs):
# initialize this catalog
Catalog.__init__(self)
if fast:
radius *= 0.1
self.load(ra=ra, dec=dec, radius=radius, write=write, faintlimit=faintlimit)
if starsarevariable:
self.addLCs(**lckw)
else:
self.addLCs(fractionofstarswithlc=0.0)
def load(self, ra=0.0, dec=90.0, radius=0.2, write=True, faintlimit=None):
# select the columns that should be downloaded from UCAC
catalog = 'UCAC4'
ratag = '_RAJ2000'
dectag = '_DEJ2000'
if catalog == 'UCAC4':
vcat = 'I/322A/out'
rmagtag = 'f.mag'
jmagtag = 'Jmag'
vmagtag = 'Vmag'
pmratag, pmdectag = 'pmRA', 'pmDE'
columns = ['_RAJ2000', '_DECJ2000', 'pmRA', 'pmDE', 'f.mag', 'Jmag', 'Vmag', 'UCAC4']
# create a query through Vizier
v = Vizier(catalog=vcat, columns=columns)
v.ROW_LIMIT = -1
# either reload an existing catalog file or download to create a new one
starsfilename = settings.intermediates + self.directory
starsfilename += "{catalog}ra{ra:.4f}dec{dec:.4f}rad{radius:.4f}".format(
catalog=catalog,
ra=ra,
dec=dec,
radius=radius) + '.npy'
try:
# try to load a raw catalog file
logger.info("loading a catalog of stars from {0}".format(starsfilename))
t = np.load(starsfilename)
except IOError:
logger.info('could not load stars')
# otherwise, make a new query
logger.info("querying {catalog} "
"for ra = {ra}, dec = {dec}, radius = {radius}".format(
catalog=catalog, ra=ra, dec=dec, radius=radius))
# load via astroquery
t = v.query_region(astropy.coordinates.ICRS(ra=ra, dec=dec,
unit=(astropy.units.deg, astropy.units.deg)),
radius='{:f}d'.format(radius), verbose=True)[0]
# save the queried table
np.save(starsfilename, t)
# define the table
self.table = astropy.table.Table(t)
ras = np.array(t[:][ratag])
decs = np.array(t[:][dectag])
pmra = np.array(t[:][pmratag])
pmdec = np.array(t[:][pmdectag])
rmag = np.array(t[:][rmagtag])
jmag = np.array(t[:][jmagtag])
vmag = np.array(t[:][vmagtag])
rbad = (np.isfinite(rmag) == False) * (np.isfinite(vmag))
rmag[rbad] = vmag[rbad]
rbad = (np.isfinite(rmag) == False) * (np.isfinite(jmag))
rmag[rbad] = jmag[rbad]
jbad = (np.isfinite(jmag) == False) * (np.isfinite(vmag))
jmag[jbad] = vmag[jbad]
jbad = (np.isfinite(jmag) == False) * (np.isfinite(rmag))
jmag[jbad] = rmag[jbad]
vbad = (np.isfinite(vmag) == False) * (np.isfinite(rmag))
vmag[vbad] = rmag[vbad]
vbad = (np.isfinite(vmag) == False) * (np.isfinite(jmag))
vmag[vbad] = jmag[vbad]
temperatures = relations.pickles(rmag - jmag)
imag = rmag - relations.davenport(rmag - jmag)
pmra[np.isfinite(pmra) == False] = 0.0
pmdec[np.isfinite(pmdec) == False] = 0.0
ok = np.isfinite(imag)
if faintlimit is not None:
ok *= imag <= faintlimit
logger.info("found {0} stars with {1} < V < {2}".format(np.sum(ok), np.min(rmag[ok]), np.max(rmag[ok])))
self.ra = ras[ok]
self.dec = decs[ok]
self.pmra = pmra[ok]
self.pmdec = pmdec[ok]
self.tmag = imag[ok]
self.temperature = temperatures[ok]
self.epoch = 2000.0
class Trimmed(Catalog):
"""a trimmed catalog, created by removing elements from another catalog"""
def __init__(self, inputcatalog, keep):
"""inputcatalog = the catalog to start with
keep = an array indices indicating which elements of inputcatalog to use"""
Catalog.__init__(self)
# define the keys to propagate from old catalog to the new one
keystotransfer = ['ra', 'dec', 'pmra', 'pmdec', 'tmag', 'temperature', 'lightcurves']
for k in keystotransfer:
self.__dict__[k] = inputcatalog.__dict__[k][keep]
self.epoch = inputcatalog.epoch
| mit |
gotomypc/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
dbednarski/pyhdust | pyhdust/beatlas.py | 1 | 16742 | # -*- coding:utf-8 -*-
"""
PyHdust *beatlas* module: BeAtlas specific variables and functions.
BAmod class: there is no disk without a reference star.
BAstar clase: there is a stand-alone star.
:license: GNU GPL v3.0 (https://github.com/danmoser/pyhdust/blob/master/LICENSE)
"""
import os as _os
import numpy as _np
import struct as _struct
from glob import glob as _glob
from itertools import product as _product
import pyhdust.phc as _phc
import pyhdust as _hdt
try:
import matplotlib.pyplot as _plt
except:
print('# Warning! matplotlib module not installed!!!')
__author__ = "Daniel Moser"
__email__ = "[email protected]"
class BAstar(object):
""" BeAtlas source star filename structure.
See BAmod.
"""
def __init__(self, f0):
self.M = f0[f0.find('_M')+2:f0.find('_M')+7]
self.ob = f0[f0.find('_ob')+3:f0.find('_ob')+7]
self.Z = f0[f0.find('_Z')+2:f0.find('_Z')+7]
self.H = f0[f0.find('_H')+2:f0.find('_H')+6]
self.beta = f0[f0.find('_Z')+8:f0.find('_Z')+10]
self.shape = f0[f0.rfind('_')+1:f0.rfind('_')+4]
self._f0 = f0
#
def __repr__(self):
return self._f0
class BAmod(BAstar):
""" BeAtlas disk model filename structure.
It could be f0.split('_'), but the f0.find('_X') way was chosen.
See that the parameters sequence is not important for this reading (this
may not be the case of other routines). And, by definition, the source star
has a specific name added at the end of disk model name, starting with
'Be_'. """
def __init__ (self, f0):
""" Class initialiser """
BAstar.__init__(self, f0)
self.param = False
if f0.find('_PL') > -1:
self.n = f0[f0.find('_PLn')+4:f0.find('_PLn')+7]
self.param = True
self.sig = f0[f0.find('_sig')+4:f0.find('_sig')+8]
self.h = f0[f0.find('_h')+2:f0.find('_h')+5]
self.Rd = f0[f0.find('_Rd')+3:f0.find('_Rd')+8]
#
def build(self, ctrlarr, listpars):
""" Set full list of parameters. """
for i in range(len(ctrlarr)):
if i == 0:
self.M = _phc.find_nearest(listpars[i], ctrlarr[i])
if i == 1:
self.ob = _phc.find_nearest(listpars[i], ctrlarr[i])
if i == 2:
self.Z = _phc.find_nearest(listpars[i], ctrlarr[i])
if i == 3:
self.H = _phc.find_nearest(listpars[i], ctrlarr[i])
if i == 4:
self.sig = _phc.find_nearest(listpars[i], ctrlarr[i])
if i == 5:
self.Rd = _phc.find_nearest(listpars[i], ctrlarr[i])
if i == 6:
self.h = _phc.find_nearest(listpars[i], ctrlarr[i])
if len(listpars) == 9:
if i == 7:
self.n = _phc.find_nearest(listpars[i], ctrlarr[i])
self.param = True
if i == 8:
self.cosi = _phc.find_nearest(listpars[i], ctrlarr[i])
else:
if i == 7:
self.cosi = _phc.find_nearest(listpars[i], ctrlarr[i])
#
def getidx(self, minfo):
""" Find index of current model in minfo array. """
if len(minfo[0])==9:
self.idx = (minfo[:,0]==self.M) & (minfo[:,1]==self.ob) &\
(minfo[:,2]==self.Z) & (minfo[:,3]==self.H) & (minfo[:,4]==self.sig) &\
(minfo[:,5]==self.Rd) & (minfo[:,6]==self.h) & (minfo[:,7]==self.n) &\
(minfo[:,-1]==self.cosi)
else:
self.idx = (minfo[:,0]==self.M) & (minfo[:,1]==self.ob) &\
(minfo[:,2]==self.Z) & (minfo[:,3]==self.H) & (minfo[:,4]==self.sig) &\
(minfo[:,5]==self.Rd) & (minfo[:,6]==self.h) &\
(minfo[:,-1]==self.cosi)
return self.idx
vrots = [[259.759,354.834,417.792,464.549,483.847],\
[252.050,346.163,406.388,449.818,468.126],\
[245.127,336.834,399.983,448.076,467.806],\
[239.522,329.496,388.734,432.532,450.806],\
[234.301,321.139,379.297,423.241,441.122],\
[228.538,313.797,370.343,412.488,429.914],\
[219.126,299.656,354.547,395.821,413.008],\
[211.544,288.840,341.081,380.426,396.978],\
[203.438,279.328,328.666,365.697,380.660],\
[197.823,268.964,316.901,353.568,368.506],\
[192.620,262.688,308.208,341.963,356.410],\
[187.003,255.125,299.737,332.511,346.043]]
obs = [1.1,1.2,1.3,1.4,1.45]
ms = [14.6, 12.5, 10.8, 9.6, 8.6, 7.7, 6.4, 5.5, 4.8, 4.2, 3.8,3.4]
Ms = _np.array([14.6, 12.5, 10.8, 9.6, 8.6, 7.7, 6.4, 5.5, 4.8, 4.2, 3.8, 3.4],\
dtype=str)
Tp11 = _np.array([28905.8,26945.8,25085.2,23629.3,22296.1,20919.7,\
18739.3,17063.8,15587.7,14300.3,13329.9,12307.1])
sig0 = _np.logspace(_np.log10(0.02),_np.log10(4.0),7)
Sig0 = ['{0:.2f}'.format(x) for x in sig0]
ns = [3.0, 3.5, 4.0, 4.5]
def rmMods(modn, Ms, clusters=['job']):
"""
Remove the *.inp models of models `modn` according to the list structure
below.
| Masses list ans sig0 POSITION do be excluded
| Ms = [
| ['14.6', [0]],
| ['12.5', [0,-1]],
| ['10.8', [0,-1]],
| ['09.6', [0,-2,-1]],
| ['08.6', [0,-2,-1]],
| ['07.7', [0,-2,-1]],
| ['06.4', [0,-3,-2,-1]],
| ['05.5', [0,-3,-2,-1]],
| ['04.8', [-4,-3,-2,-1]],
| ['04.2', [-4,-3,-2,-1]],
| ['03.8', [-4,-3,-2,-1]],
| ['03.4', [-4,-3,-2,-1]],]
INPUT: string, structured list
OUTPUT: *files removed
"""
#Create sig0 list
sig0s = Sig0
project = _phc.trimpathname(_os.getcwd())[1]
for cl in clusters:
file = open('{0}s/{0}s_{1}_mod{2}.sh'.format(cl, project, modn))
lines = file.readlines()
file.close()
for item in Ms:
M = item[0]
exsig = item[1]
for rm in exsig:
_os.system('rm mod{0}/mod{0}*_sig{1}*_M{2}*.inp'.format(modn,
sig0s[rm], M))
print('# Deleted mod{0}/mod{0}*_sig{1}*_M{2}*.inp'.format(modn,
sig0s[rm],M))
_os.system('rm {3}s/mod{0}*_sig{1}*_M{2}*.{3}'.format(modn,
sig0s[rm], M, cl))
lines = [line for line in lines if (line.find('_sig{0}'.format(
sig0s[rm]))==-1 or line.find('_M{0}'.format(M))==-1)]
file = open('{0}s/{0}s_{1}_mod{2}.sh'.format(cl, project, modn), 'w')
file.writelines(lines)
file.close()
#End prog
return
def fsedList(fsedlist, param=True):
""" Return the total of models and the parameters values in the fullsed list.
The len of fsedlist is 9 (param=True) for the parametric case and 8
to the VDD-ST one.
The sequence is: M, ob(W), Z, H, sig, Rd, h, *n*, cos(i).
It is assumed that all models have the same `observers` configuration."""
nq = 9
if not param:
nq = 8
listpar = [[] for i in range(nq)]
nm = 0
for sed in fsedlist:
mod = BAmod(sed)
if mod.param == param:
nm += 1
if mod.M not in listpar[0]:
listpar[0].append(mod.M)
if mod.ob not in listpar[1]:
listpar[1].append(mod.ob)
if mod.Z not in listpar[2]:
listpar[2].append(mod.Z)
if mod.H not in listpar[3]:
listpar[3].append(mod.H)
if mod.sig not in listpar[4]:
listpar[4].append(mod.sig)
if mod.Rd not in listpar[5]:
listpar[5].append(mod.Rd)
if mod.h not in listpar[6]:
listpar[6].append(mod.h)
if param:
if mod.n not in listpar[7]:
listpar[7].append(mod.n)
if listpar[-1] == []:
sed2data = _hdt.readfullsed2(sed)
listpar[-1] = list(sed2data[:,0,0])
#
for vals in listpar:
vals.sort()
return nm*len(listpar[-1]), listpar
def createBAsed(fsedlist, xdrpath, lbdarr, param=True, savetxt=False,
ignorelum=False):
""" Create the BeAtlas SED XDR release.
| The file structure:
| -n_quantities, n_lbd, n_models,
| -n_qt_vals1, n_qt_vals2, .. n_qt_valsn
| -quantities values = M, ob(W), Z, H, sig, Rd, h, *n*, cos(i).
| -(Unique) lbd array
| -Loop:
| *model values
| *model SED
| Definitions:
| -photospheric models: sig0 = 0.00
| -Parametric disk model default (`param` == True)
| -VDD-ST models: n excluded (alpha and R0 fixed. Confirm?)
| -The flux will be given in ergs/s/um2/um. If ignorelum==True, the usual
| F_lbda/F_bol unit will be given.
Since the grid is not symmetric, there is no index to jump directly to the
desired model. So the suggestion is to use the index matrix, or read the
file line by line until find the model (if exists).
"""
fsedlist.sort()
nq = 9
if not param:
nq = 8
nm, listpar = fsedList(fsedlist, param=param)
header2 = []
for vals in listpar:
header2 += [len(vals)]
nlb = len(lbdarr)
header1 = [nq, nlb, nm]
models = _np.zeros((nm, nlb))
minfo = _np.zeros((nm, nq))
k = 0
for i in range(len(fsedlist)):
mod = BAmod(fsedlist[i])
#~ Select only `param` matching cases:
if mod.param == param:
sed2data = _hdt.readfullsed2(fsedlist[i])
iL = 1.
dist = _np.sqrt(4*_np.pi)
if not ignorelum:
j = fsedlist[i].find('fullsed_mod')
modn = fsedlist[i][j+11:j+13]
log = fsedlist[i].replace('fullsed_mod','../mod{0}/mod'.format(modn)).\
replace('.sed2','.log')
if not _os.path.exists(log):
log = _glob(log.replace('../mod{0}/mod'.format(modn),
'../mod{0}/*mod'.format(modn)))
if len(log) >= 1:
log = log[0]
else:
print('# ERROR! No log file found for {0}'.format(fsedlist[i]))
raise SystemExit(0)
f0 = open(log)
lines = f0.readlines()
f0.close()
iL = _phc.fltTxtOccur('L =', lines, seq=2)*_phc.Lsun.cgs
dist = 10.*_phc.pc.cgs
for j in range(header2[-1]):
#~ M, ob(W), Z, H, sig, Rd, h, *n*, cos(i).
if param:
minfo[k*header2[-1]+j] = _np.array([ mod.M, mod.ob, mod.Z, mod.H,
mod.sig, mod.Rd, mod.h, mod.n, listpar[-1][j] ]).astype(float)
else:
minfo[k*header2[-1]+j] = _np.array([ mod.M, mod.ob, mod.Z, mod.H,
mod.sig, mod.Rd, mod.h, listpar[-1][j] ]).astype(float)
if len(sed2data[j,:,2]) != nlb:
models[k*header2[-1]+j] = _np.interp(lbdarr, sed2data[j,:,2],
sed2data[j,:,3])*iL/4/_np.pi/dist**2
else:
models[k*header2[-1]+j] = sed2data[j,:,3]*iL/4/_np.pi/dist**2
k += 1
#
f0 = open(xdrpath, 'w')
stfmt = '>{0}l'.format(3)
f0.writelines(_struct.pack(stfmt, *header1))
stfmt = '>{0}l'.format(nq)
f0.writelines(_struct.pack(stfmt, *header2))
for vals in listpar:
stfmt = '>{0}f'.format(len(vals))
f0.writelines(_struct.pack(stfmt, *_np.array(vals).astype(float)))
stfmt = '>{0}f'.format(nlb)
f0.writelines(_struct.pack(stfmt, *_np.array(lbdarr).astype(float)))
for i in range(nm):
stfmt = '>{0}f'.format(nq)
f0.writelines(_struct.pack(stfmt, *minfo[i]))
stfmt = '>{0}f'.format(nlb)
f0.writelines(_struct.pack(stfmt, *_np.array(models[i]).astype(float)))
f0.close()
print('# XDR file {0} saved!'.format(xdrpath))
if savetxt:
f0 = open(xdrpath+'.txt', 'w')
f0.writelines('{} \n'.format(header1))
f0.writelines('{} \n'.format(header2))
for vals in listpar:
f0.writelines('{} \n'.format(vals))
f0.writelines('{} \n'.format(lbdarr))
for i in range(nm):
f0.writelines('{} \n'.format(minfo[i]))
f0.writelines('{} \n'.format(models[i]))
f0.close()
print('# TXT file {0} saved!'.format(xdrpath+'.txt'))
return
def readBAsed(xdrpath, quiet=False):
""" Read the BeAtlas SED release.
| Definitions:
| -photospheric models: sig0 (and other quantities) == 0.00
| -Parametric disk model default (`param` == True)
| -VDD-ST models: n excluded (alpha and R0 fixed. Confirm?)
| -The models flux are given in ergs/s/cm2/um. If ignorelum==True in the
| XDR creation, F_lbda/F_bol unit will be given.
INPUT: xdrpath
| OUTPUT: listpar, lbdarr, minfo, models
| (list of mods parameters, lambda array (um), mods index, mods flux)
"""
f = open(xdrpath).read()
ixdr=0
#~
npxs = 3
upck = '>{0}l'.format(npxs)
header = _np.array(_struct.unpack(upck, f[ixdr:ixdr+npxs*4]) )
ixdr+=npxs*4
nq, nlb, nm = header
#~
npxs = nq
upck = '>{0}l'.format(npxs)
header = _np.array(_struct.unpack(upck, f[ixdr:ixdr+npxs*4]) )
ixdr+=npxs*4
#~
listpar = [[] for i in range(nq)]
for i in range(nq):
npxs = header[i]
upck = '>{0}f'.format(npxs)
listpar[i] = _np.array(_struct.unpack(upck, f[ixdr:ixdr+npxs*4]) )
ixdr+=npxs*4
#~
npxs = nlb
upck = '>{0}f'.format(npxs)
lbdarr = _np.array(_struct.unpack(upck, f[ixdr:ixdr+npxs*4]) )
ixdr+=npxs*4
#~
npxs = nm*(nq+nlb)
upck = '>{0}f'.format(npxs)
models = _np.array(_struct.unpack(upck, f[ixdr:ixdr+npxs*4]) )
ixdr+=npxs*4
models = models.reshape((nm,-1))
#this will check if the XDR is finished.
if ixdr == len(f):
if not quiet:
print('# XDR {0} completely read!'.format(xdrpath))
else:
print('# Warning: XDR {0} not completely read!'.format(xdrpath))
print('# length difference is {0}'.format( (len(f)-ixdr)/4 ) )
#~
return listpar, lbdarr, models[:,0:nq], models[:,nq:]
def interpolBA(params, ctrlarr, lparams, minfo, models, param=True):
""" Interpola os `modelos` para os parametros `params`
| -params = from emcee minimization
| -ctrlarr = the fixed value of M, ob(W), Z, H, sig, Rd, h, *n*, cos(i).
| If it is not fixed, use np.NaN.
| -Parametric disk model default (`param` == True).
This function always returns a valid result (i.e., extrapolations from the
nearest values are always on).
If it is a 'Non-squared grid' (asymmetric), it will return a zero array if
a given model is not found.
"""
nq = 9
if not param:
nq = 8
if len(ctrlarr) != nq:
print('# ERROR in ctrlarr!!')
return
params = params[:_np.sum(_np.isnan(ctrlarr))]
nlb = len(models[0])
outmodels = _np.empty((2**len(params),nlb))
mod = BAmod('')
parlims = _np.zeros((len(params), 2))
j = 0
for i in range(nq):
if ctrlarr[i] is _np.NaN:
parlims[j] = [_phc.find_nearest(lparams[i], params[j], bigger=False),
_phc.find_nearest(lparams[i], params[j], bigger=True)]
j+= 1
j = 0
for prod in _product(*parlims):
allpars = _np.array(ctrlarr)
idx = _np.isnan(allpars)
allpars[idx] = prod
mod.build(allpars, lparams)
idx = mod.getidx(minfo)
if _np.sum(idx) == 0:
return _np.zeros(nlb)
outmodels[j] = models[idx]
j+= 1
X0 = parlims[:,0]
X1 = parlims[:,1]
return _phc.interLinND(params, X0, X1, outmodels)
def breakJob(n, file):
""" Break the jobs/jobs_Project_modn.sh into n files
../jobs_Project_modn_##.txt to be used with `dispara` """
f0 = open(file)
lines = f0.readlines()
f0.close()
lines.sort()
lines = [line.replace('qsub ','') for line in lines]
outname = _phc.trimpathname(file)[1].replace('.sh','')
N = len(lines)
for i in range(n):
f0 = open('{0}_{1:02d}.txt'.format(outname, i), 'w')
f0.writelines(lines[i*N/n:(i+1)*N/n])
f0.close()
print('# {0} files created!'.format(n))
return
def correltable(pos):
""" Create the correlation table of Domiciano de Souza+ 2014. """
nwalkers = len(pos)
ndim = len(pos[0])
fig = _plt.figure()
for i in range(ndim**2):
ax = fig.add_subplot(ndim,ndim,i+1)
if i+1 in [ 1+x*(ndim+1) for x in range(ndim) ]:
ax.hist(pos[:,i/ndim], 20)
else:
ax.plot(pos[:,i/ndim], pos[:,i%ndim], 'o', markersize=2)
_plt.savefig('correl.png', Transparent=True)
_plt.close()
print('# Figure "correl.png" saved!')
return
### MAIN ###
if __name__ == "__main__":
pass
| gpl-3.0 |
vascotenner/holoviews | holoviews/element/tabular.py | 1 | 5921 | import numpy as np
import param
from ..core import OrderedDict, Dimension, Element, Dataset, Tabular
class ItemTable(Element):
"""
A tabular element type to allow convenient visualization of either
a standard Python dictionary, an OrderedDict or a list of tuples
(i.e. input suitable for an OrderedDict constructor). If an
OrderedDict is used, the headings will be kept in the correct
order. Tables store heterogeneous data with different labels.
Dimension objects are also accepted as keys, allowing dimensional
information (e.g type and units) to be associated per heading.
"""
kdims = param.List(default=[], bounds=(0, 0), doc="""
ItemTables hold an index Dimension for each value they contain, i.e.
they are equivalent to the keys.""")
vdims = param.List(default=[Dimension('Default')], bounds=(1, None), doc="""
ItemTables should have only index Dimensions.""")
group = param.String(default="ItemTable", constant=True)
@property
def rows(self):
return len(self.vdims)
@property
def cols(self):
return 2
def __init__(self, data, **params):
if type(data) == dict:
raise ValueError("ItemTable cannot accept a standard Python dictionary "
"as a well-defined item ordering is required.")
elif isinstance(data, dict): pass
elif isinstance(data, list):
data = OrderedDict(data)
else:
data = OrderedDict(list(data)) # Python 3
if not 'vdims' in params:
params['vdims'] = list(data.keys())
str_keys = OrderedDict((k.name if isinstance(k, Dimension)
else k ,v) for (k,v) in data.items())
super(ItemTable, self).__init__(str_keys, **params)
def __getitem__(self, heading):
"""
Get the value associated with the given heading (key).
"""
if heading is ():
return self
if heading not in self.vdims:
raise KeyError("%r not in available headings." % heading)
return np.array(self.data.get(heading, np.NaN))
@classmethod
def collapse_data(cls, data, function, **kwargs):
groups = np.vstack([np.array(odict.values()) for odict in data]).T
return OrderedDict(zip(data[0].keys(), function(groups, axis=-1, **kwargs)))
def dimension_values(self, dimension, expanded=True, flat=True):
dimension = self.get_dimension(dimension, strict=True).name
if dimension in self.dimensions('value', label=True):
return np.array([self.data.get(dimension, np.NaN)])
else:
return super(ItemTable, self).dimension_values(dimension)
def sample(self, samples=[]):
if callable(samples):
sampled_data = OrderedDict(item for item in self.data.items()
if samples(item))
else:
sampled_data = OrderedDict((s, self.data.get(s, np.NaN)) for s in samples)
return self.clone(sampled_data)
def reduce(self, dimensions=None, function=None, **reduce_map):
raise NotImplementedError('ItemTables are for heterogeneous data, which'
'cannot be reduced.')
def pprint_cell(self, row, col):
"""
Get the formatted cell value for the given row and column indices.
"""
if col > 2:
raise Exception("Only two columns available in a ItemTable.")
elif row >= self.rows:
raise Exception("Maximum row index is %d" % self.rows-1)
elif col == 0:
return str(self.dimensions('value')[row])
else:
dim = self.get_dimension(row)
heading = self.vdims[row]
return dim.pprint_value(self.data.get(heading.name, np.NaN))
def hist(self, *args, **kwargs):
raise NotImplementedError("ItemTables are not homogenous and "
"don't support histograms.")
def cell_type(self, row, col):
"""
Returns the cell type given a row and column index. The common
basic cell types are 'data' and 'heading'.
"""
if col == 0: return 'heading'
else: return 'data'
def dframe(self):
"""
Generates a Pandas dframe from the ItemTable.
"""
from pandas import DataFrame
return DataFrame({(k.name if isinstance(k, Dimension)
else k): [v] for k, v in self.data.items()})
def table(self, datatype=None):
return Table(OrderedDict([((), self.values())]), kdims=[],
vdims=self.vdims)
def values(self):
return tuple(self.data.get(d.name, np.NaN)
for d in self.vdims)
class Table(Dataset, Tabular):
"""
Table is an NdElement type, which gets displayed in a tabular
format and is convertible to most other Element types.
"""
group = param.String(default='Table', constant=True, doc="""
The group is used to describe the Table.""")
def _add_item(self, key, value, sort=True):
if self.indexed and ((key != len(self)) and (key != (len(self),))):
raise Exception("Supplied key %s does not correspond to the items row number." % key)
if isinstance(value, (dict, OrderedDict)):
if all(isinstance(k, str) for k in key):
value = ItemTable(value)
else:
raise ValueError("Tables only supports string inner"
"keys when supplied nested dictionary")
if isinstance(value, ItemTable):
if value.vdims != self.vdims:
raise Exception("Input ItemTables dimensions must match value dimensions.")
value = value.data.values()
super(Table, self)._add_item(key, value, sort)
| bsd-3-clause |
elijah513/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
timeyyy/PyUpdater | pyupdater/vendor/PyInstaller/hooks/hookutils.py | 9 | 22893 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import glob
import os
import sys
import PyInstaller
import PyInstaller.compat as compat
from PyInstaller.compat import is_darwin, is_win
from PyInstaller.utils import misc
import PyInstaller.log as logging
logger = logging.getLogger(__name__)
# Some hooks need to save some values. This is the dict that can be used for
# that.
#
# When running tests this variable should be reseted before every test.
#
# For example the 'wx' module needs variable 'wxpubsub'. This tells PyInstaller
# which protocol of the wx module should be bundled.
hook_variables = {}
def __exec_python_cmd(cmd):
"""
Executes an externally spawned Python interpreter and returns
anything that was emitted in the standard output as a single
string.
"""
# Prepend PYTHONPATH with pathex
pp = os.pathsep.join(PyInstaller.__pathex__)
old_pp = compat.getenv('PYTHONPATH')
if old_pp:
pp = os.pathsep.join([old_pp, pp])
compat.setenv("PYTHONPATH", pp)
try:
try:
txt = compat.exec_python(*cmd)
except OSError, e:
raise SystemExit("Execution failed: %s" % e)
finally:
if old_pp is not None:
compat.setenv("PYTHONPATH", old_pp)
else:
compat.unsetenv("PYTHONPATH")
return txt.strip()
def exec_statement(statement):
"""Executes a Python statement in an externally spawned interpreter, and
returns anything that was emitted in the standard output as a single string.
"""
cmd = ['-c', statement]
return __exec_python_cmd(cmd)
def exec_script(script_filename, *args):
"""
Executes a Python script in an externally spawned interpreter, and
returns anything that was emitted in the standard output as a
single string.
To prevent missuse, the script passed to hookutils.exec-script
must be located in the `hooks/utils` directory.
"""
script_filename = os.path.join('utils', os.path.basename(script_filename))
script_filename = os.path.join(os.path.dirname(__file__), script_filename)
if not os.path.exists(script_filename):
raise SystemError("To prevent missuse, the script passed to "
"hookutils.exec-script must be located in "
"the `hooks/utils` directory.")
# Scripts might be importing some modules. Add PyInstaller code to pathex.
pyinstaller_root_dir = os.path.dirname(os.path.abspath(PyInstaller.__path__[0]))
PyInstaller.__pathex__.append(pyinstaller_root_dir)
cmd = [script_filename]
cmd.extend(args)
return __exec_python_cmd(cmd)
def eval_statement(statement):
txt = exec_statement(statement).strip()
if not txt:
# return an empty string which is "not true" but iterable
return ''
return eval(txt)
def eval_script(scriptfilename, *args):
txt = exec_script(scriptfilename, *args).strip()
if not txt:
# return an empty string which is "not true" but iterable
return ''
return eval(txt)
def get_pyextension_imports(modname):
"""
Return list of modules required by binary (C/C++) Python extension.
Python extension files ends with .so (Unix) or .pyd (Windows).
It's almost impossible to analyze binary extension and its dependencies.
Module cannot be imported directly.
Let's at least try import it in a subprocess and get the diffrence
in module list from sys.modules.
This function could be used for 'hiddenimports' in PyInstaller hooks files.
"""
statement = """
import sys
# Importing distutils filters common modules, especiall in virtualenv.
import distutils
original_modlist = sys.modules.keys()
# When importing this module - sys.modules gets updated.
import %(modname)s
all_modlist = sys.modules.keys()
diff = set(all_modlist) - set(original_modlist)
# Module list contain original modname. We do not need it there.
diff.discard('%(modname)s')
# Print module list to stdout.
print list(diff)
""" % {'modname': modname}
module_imports = eval_statement(statement)
if not module_imports:
logger.error('Cannot find imports for module %s' % modname)
return [] # Means no imports found or looking for imports failed.
#module_imports = filter(lambda x: not x.startswith('distutils'), module_imports)
return module_imports
def qt4_plugins_dir():
qt4_plugin_dirs = eval_statement(
"from PyQt4.QtCore import QCoreApplication;"
"app=QCoreApplication([]);"
"print map(unicode,app.libraryPaths())")
if not qt4_plugin_dirs:
logger.error("Cannot find PyQt4 plugin directories")
return ""
for d in qt4_plugin_dirs:
if os.path.isdir(d):
return str(d) # must be 8-bit chars for one-file builds
logger.error("Cannot find existing PyQt4 plugin directory")
return ""
def qt4_phonon_plugins_dir():
qt4_plugin_dirs = eval_statement(
"from PyQt4.QtGui import QApplication;"
"app=QApplication([]); app.setApplicationName('pyinstaller');"
"from PyQt4.phonon import Phonon;"
"v=Phonon.VideoPlayer(Phonon.VideoCategory);"
"print map(unicode,app.libraryPaths())")
if not qt4_plugin_dirs:
logger.error("Cannot find PyQt4 phonon plugin directories")
return ""
for d in qt4_plugin_dirs:
if os.path.isdir(d):
return str(d) # must be 8-bit chars for one-file builds
logger.error("Cannot find existing PyQt4 phonon plugin directory")
return ""
def qt4_plugins_binaries(plugin_type):
"""Return list of dynamic libraries formatted for mod.binaries."""
binaries = []
pdir = qt4_plugins_dir()
files = misc.dlls_in_dir(os.path.join(pdir, plugin_type))
for f in files:
binaries.append((
os.path.join('qt4_plugins', plugin_type, os.path.basename(f)),
f, 'BINARY'))
return binaries
def qt4_menu_nib_dir():
"""Return path to Qt resource dir qt_menu.nib. OSX only"""
menu_dir = ''
# Detect MacPorts prefix (usually /opt/local).
# Suppose that PyInstaller is using python from macports.
macports_prefix = sys.executable.split('/Library')[0]
# list of directories where to look for qt_menu.nib
dirs = []
# If PyQt4 is built against Qt5 look for the qt_menu.nib in a user
# specified location, if it exists.
if 'QT5DIR' in os.environ:
dirs.append(os.path.join(os.environ['QT5DIR'],
"src", "plugins", "platforms", "cocoa"))
dirs += [
# Qt4 from MacPorts not compiled as framework.
os.path.join(macports_prefix, 'lib', 'Resources'),
# Qt4 from MacPorts compiled as framework.
os.path.join(macports_prefix, 'libexec', 'qt4-mac', 'lib',
'QtGui.framework', 'Versions', '4', 'Resources'),
# Qt4 installed into default location.
'/Library/Frameworks/QtGui.framework/Resources',
'/Library/Frameworks/QtGui.framework/Versions/4/Resources',
'/Library/Frameworks/QtGui.Framework/Versions/Current/Resources',
]
# Qt4 from Homebrew compiled as framework
globpath = '/usr/local/Cellar/qt/4.*/lib/QtGui.framework/Versions/4/Resources'
qt_homebrew_dirs = glob.glob(globpath)
dirs += qt_homebrew_dirs
# Check directory existence
for d in dirs:
d = os.path.join(d, 'qt_menu.nib')
if os.path.exists(d):
menu_dir = d
break
if not menu_dir:
logger.error('Cannot find qt_menu.nib directory')
return menu_dir
def qt5_plugins_dir():
qt5_plugin_dirs = eval_statement(
"from PyQt5.QtCore import QCoreApplication;"
"app=QCoreApplication([]);"
"print map(unicode,app.libraryPaths())")
if not qt5_plugin_dirs:
logger.error("Cannot find PyQt5 plugin directories")
return ""
for d in qt5_plugin_dirs:
if os.path.isdir(d):
return str(d) # must be 8-bit chars for one-file builds
logger.error("Cannot find existing PyQt5 plugin directory")
return ""
def qt5_phonon_plugins_dir():
qt5_plugin_dirs = eval_statement(
"from PyQt5.QtGui import QApplication;"
"app=QApplication([]); app.setApplicationName('pyinstaller');"
"from PyQt5.phonon import Phonon;"
"v=Phonon.VideoPlayer(Phonon.VideoCategory);"
"print map(unicode,app.libraryPaths())")
if not qt5_plugin_dirs:
logger.error("Cannot find PyQt5 phonon plugin directories")
return ""
for d in qt5_plugin_dirs:
if os.path.isdir(d):
return str(d) # must be 8-bit chars for one-file builds
logger.error("Cannot find existing PyQt5 phonon plugin directory")
return ""
def qt5_plugins_binaries(plugin_type):
"""Return list of dynamic libraries formatted for mod.binaries."""
binaries = []
pdir = qt5_plugins_dir()
files = misc.dlls_in_dir(os.path.join(pdir, plugin_type))
for f in files:
binaries.append((
os.path.join('qt5_plugins', plugin_type, os.path.basename(f)),
f, 'BINARY'))
return binaries
def qt5_menu_nib_dir():
"""Return path to Qt resource dir qt_menu.nib. OSX only"""
menu_dir = ''
# If the QT5DIR env var is set then look there first. It should be set to the
# qtbase dir in the Qt5 distribution.
dirs = []
if 'QT5DIR' in os.environ:
dirs.append(os.path.join(os.environ['QT5DIR'],
"src", "plugins", "platforms", "cocoa"))
# As of the time of writing macports doesn't yet support Qt5. So this is
# just modified from the Qt4 version.
# FIXME: update this when MacPorts supports Qt5
# Detect MacPorts prefix (usually /opt/local).
# Suppose that PyInstaller is using python from macports.
macports_prefix = sys.executable.split('/Library')[0]
# list of directories where to look for qt_menu.nib
dirs.extend( [
# Qt5 from MacPorts not compiled as framework.
os.path.join(macports_prefix, 'lib', 'Resources'),
# Qt5 from MacPorts compiled as framework.
os.path.join(macports_prefix, 'libexec', 'qt5-mac', 'lib',
'QtGui.framework', 'Versions', '5', 'Resources'),
# Qt5 installed into default location.
'/Library/Frameworks/QtGui.framework/Resources',
'/Library/Frameworks/QtGui.framework/Versions/5/Resources',
'/Library/Frameworks/QtGui.Framework/Versions/Current/Resources',
])
# Copied verbatim from the Qt4 version with 4 changed to 5
# Qt5 from Homebrew compiled as framework
globpath = '/usr/local/Cellar/qt/5.*/lib/QtGui.framework/Versions/5/Resources'
qt_homebrew_dirs = glob.glob(globpath)
dirs += qt_homebrew_dirs
# Check directory existence
for d in dirs:
d = os.path.join(d, 'qt_menu.nib')
if os.path.exists(d):
menu_dir = d
break
if not menu_dir:
logger.error('Cannot find qt_menu.nib directory')
return menu_dir
def qt5_qml_dir():
import subprocess
qmldir = subprocess.check_output(["qmake", "-query",
"QT_INSTALL_QML"]).strip()
if len(qmldir) == 0:
logger.error('Cannot find QT_INSTALL_QML directory, "qmake -query '
+ 'QT_INSTALL_QML" returned nothing')
if not os.path.exists(qmldir):
logger.error("Directory QT_INSTALL_QML: %s doesn't exist" % qmldir)
# On Windows 'qmake -query' uses / as the path separator
# so change it to \\.
if is_win:
import string
qmldir = string.replace(qmldir, '/', '\\')
return qmldir
def qt5_qml_data(dir):
"""Return Qml library dir formatted for data"""
qmldir = qt5_qml_dir()
return (os.path.join(qmldir, dir), 'qml')
def qt5_qml_plugins_binaries(dir):
"""Return list of dynamic libraries formatted for mod.binaries."""
import string
binaries = []
qmldir = qt5_qml_dir()
dir = string.rstrip(dir, os.sep)
files = misc.dlls_in_subdirs(os.path.join(qmldir, dir))
if files is not None:
for f in files:
relpath = string.lstrip(f, qmldir)
instdir, file = os.path.split(relpath)
instdir = os.path.join("qml", instdir)
logger.debug("qt5_qml_plugins_binaries installing %s in %s"
% (f, instdir) )
binaries.append((
os.path.join(instdir, os.path.basename(f)),
f, 'BINARY'))
return binaries
def django_dottedstring_imports(django_root_dir):
"""
Get all the necessary Django modules specified in settings.py.
In the settings.py the modules are specified in several variables
as strings.
"""
package_name = os.path.basename(django_root_dir)
compat.setenv('DJANGO_SETTINGS_MODULE', '%s.settings' % package_name)
# Extend PYTHONPATH with parent dir of django_root_dir.
PyInstaller.__pathex__.append(misc.get_path_to_toplevel_modules(django_root_dir))
# Extend PYTHONPATH with django_root_dir.
# Many times Django users do not specify absolute imports in the settings module.
PyInstaller.__pathex__.append(django_root_dir)
ret = eval_script('django-import-finder.py')
# Unset environment variables again.
compat.unsetenv('DJANGO_SETTINGS_MODULE')
return ret
def django_find_root_dir():
"""
Return path to directory (top-level Python package) that contains main django
files. Return None if no directory was detected.
Main Django project directory contain files like '__init__.py', 'settings.py'
and 'url.py'.
In Django 1.4+ the script 'manage.py' is not in the directory with 'settings.py'
but usually one level up. We need to detect this special case too.
"""
# Get the directory with manage.py. Manage.py is supplied to PyInstaller as the
# first main executable script.
manage_py = sys._PYI_SETTINGS['scripts'][0]
manage_dir = os.path.dirname(os.path.abspath(manage_py))
# Get the Django root directory. The directory that contains settings.py and url.py.
# It could be the directory containig manage.py or any of its subdirectories.
settings_dir = None
files = set(os.listdir(manage_dir))
if 'settings.py' in files and 'urls.py' in files:
settings_dir = manage_dir
else:
for f in files:
if os.path.isdir(f):
subfiles = os.listdir(os.path.join(manage_dir, f))
# Subdirectory contains critical files.
if 'settings.py' in subfiles and 'urls.py' in subfiles:
settings_dir = os.path.join(manage_dir, f)
break # Find the first directory.
return settings_dir
def matplotlib_backends():
"""
Return matplotlib backends availabe in current Python installation.
All matplotlib backends are hardcoded. We have to try import them
and return the list of successfully imported backends.
"""
all_bk = eval_statement('import matplotlib; print matplotlib.rcsetup.all_backends')
avail_bk = []
import_statement = """
try:
__import__('matplotlib.backends.backend_%s')
except ImportError, e:
print str(e)
"""
# CocoaAgg backend causes subprocess to exit and thus detection
# is not reliable. This backend is meaningful only on Mac OS X.
if not is_darwin and 'CocoaAgg' in all_bk:
all_bk.remove('CocoaAgg')
# Try to import every backend in a subprocess.
for bk in all_bk:
stdout = exec_statement(import_statement % bk.lower())
# Backend import is successfull if there is no text in stdout.
if not stdout:
avail_bk.append(bk)
# Convert backend name to module name.
# e.g. GTKAgg -> backend_gtkagg
return ['backend_' + x.lower() for x in avail_bk]
def opengl_arrays_modules():
"""
Return list of array modules for OpenGL module.
e.g. 'OpenGL.arrays.vbo'
"""
statement = 'import OpenGL; print OpenGL.__path__[0]'
opengl_mod_path = PyInstaller.hooks.hookutils.exec_statement(statement)
arrays_mod_path = os.path.join(opengl_mod_path, 'arrays')
files = glob.glob(arrays_mod_path + '/*.py')
modules = []
for f in files:
mod = os.path.splitext(os.path.basename(f))[0]
# Skip __init__ module.
if mod == '__init__':
continue
modules.append('OpenGL.arrays.' + mod)
return modules
def remove_prefix(string, prefix):
"""
This funtion removes the given prefix from a string, if the string does
indeed begin with the prefix; otherwise, it returns the string
unmodified.
"""
if string.startswith(prefix):
return string[len(prefix):]
else:
return string
def remove_suffix(string, suffix):
"""
This funtion removes the given suffix from a string, if the string
does indeed end with the prefix; otherwise, it returns the string
unmodified.
"""
# Special case: if suffix is empty, string[:0] returns ''. So, test
# for a non-empty suffix.
if suffix and string.endswith(suffix):
return string[:-len(suffix)]
else:
return string
def remove_file_extension(filename):
"""
This funtion returns filename without its extension.
"""
return os.path.splitext(filename)[0]
def get_module_file_attribute(package):
"""
Given a pacage name, return the value of __file__ attribute.
In PyInstaller process we cannot import directly analyzed modules.
"""
# Statement to return __file__ attribute of a package.
__file__statement = """
# Fun Python behavior: __import__('mod.submod') returns mod,
# where as __import__('mod.submod', fromlist = [a non-empty list])
# returns mod.submod. See the docs on `__import__
# <http://docs.python.org/library/functions.html#__import__>`_.
# Keyworded arguments in __import__ function are available
# in Python 2.5+. Compatibility with Python 2.4 is preserved.
_fromlist = ['']
_globals = {}
_locals = {}
package = __import__('%s', _globals, _locals, _fromlist)
print package.__file__
"""
return exec_statement(__file__statement % package)
def get_package_paths(package):
"""
Given a package, return the path to packages stored on this machine
and also returns the path to this particular package. For example,
if pkg.subpkg lives in /abs/path/to/python/libs, then this function
returns (/abs/path/to/python/libs,
/abs/path/to/python/libs/pkg/subpkg).
"""
# A package must have a path -- check for this, in case the package
# parameter is actually a module.
is_pkg_statement = 'import %s as p; print hasattr(p, "__path__")'
is_package = eval_statement(is_pkg_statement % package)
assert is_package
file_attr = get_module_file_attribute(package)
# package.__file__ = /abs/path/to/package/subpackage/__init__.py.
# Search for Python files in /abs/path/to/package/subpackage; pkg_dir
# stores this path.
pkg_dir = os.path.dirname(file_attr)
# When found, remove /abs/path/to/ from the filename; mod_base stores
# this path to be removed.
pkg_base = remove_suffix(pkg_dir, package.replace('.', os.sep))
return pkg_base, pkg_dir
# All these extension represent Python modules or extension modules
PY_EXECUTABLE_EXTENSIONS = set(['.py', '.pyc', '.pyd', '.pyo', '.so'])
def collect_submodules(package):
"""
The following two functions were originally written by Ryan Welsh
(welchr AT umich.edu).
This produces a list of strings which specify all the modules in
package. Its results can be directly assigned to ``hiddenimports``
in a hook script; see, for example, hook-sphinx.py. The
package parameter must be a string which names the package.
This function does not work on zipped Python eggs.
This function is used only for hook scripts, but not by the body of
PyInstaller.
"""
pkg_base, pkg_dir = get_package_paths(package)
# Walk through all file in the given package, looking for submodules.
mods = set()
for dirpath, dirnames, filenames in os.walk(pkg_dir):
# Change from OS separators to a dotted Python module path,
# removing the path up to the package's name. For example,
# '/abs/path/to/desired_package/sub_package' becomes
# 'desired_package.sub_package'
mod_path = remove_prefix(dirpath, pkg_base).replace(os.sep, ".")
# If this subdirectory is a package, add it and all other .py
# files in this subdirectory to the list of modules.
if '__init__.py' in filenames:
mods.add(mod_path)
for f in filenames:
extension = os.path.splitext(f)[1]
if ((remove_file_extension(f) != '__init__') and
extension in PY_EXECUTABLE_EXTENSIONS):
mods.add(mod_path + "." + remove_file_extension(f))
else:
# If not, nothing here is part of the package; don't visit any of
# these subdirs.
del dirnames[:]
return list(mods)
# These extensions represent Python executables and should therefore be
# ignored.
PY_IGNORE_EXTENSIONS = set(['.py', '.pyc', '.pyd', '.pyo', '.so', 'dylib'])
def collect_data_files(package):
"""
This routine produces a list of (source, dest) non-Python (i.e. data)
files which reside in package. Its results can be directly assigned to
``datas`` in a hook script; see, for example, hook-sphinx.py. The
package parameter must be a string which names the package.
This function does not work on zipped Python eggs.
This function is used only for hook scripts, but not by the body of
PyInstaller.
"""
pkg_base, pkg_dir = get_package_paths(package)
# Walk through all file in the given package, looking for data files.
datas = []
for dirpath, dirnames, files in os.walk(pkg_dir):
for f in files:
extension = os.path.splitext(f)[1]
if not extension in PY_IGNORE_EXTENSIONS:
# Produce the tuple
# (/abs/path/to/source/mod/submod/file.dat,
# mod/submod/file.dat)
source = os.path.join(dirpath, f)
dest = remove_prefix(dirpath,
os.path.dirname(pkg_base) + os.sep)
datas.append((source, dest))
return datas
| bsd-2-clause |
jzt5132/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
jongoodnow/anglerfish | src/kinect/app.py | 1 | 5239 | from __future__ import division
from freenect import sync_get_depth as get_depth, sync_get_video as get_video
import cv
import numpy as np
from Tkinter import *
import Image, ImageTk
import time
import urllib2
import matplotlib.pyplot as plt
import sys
from scipy import stats
def save_img():
global rgb
(rgb,_) = get_video()
cv.SaveImage('/tmp/anglerfish-img.jpg', cv.fromarray(np.array(rgb)))
def get_corners():
save_img()
root = Tk()
image = Image.open('/tmp/anglerfish-img.jpg')
size = image.size
canvas = Canvas(root, width=size[0], height=size[1])
canvas.pack()
img = ImageTk.PhotoImage(image)
canvas.create_image(0,0,image=img, anchor="nw")
global board_corners, corners_counted
board_corners = []
corners_counted = [0]
def get_corner_coords(event):
x, y = event.x, event.y
board_corners.append((x, y))
canvas.create_oval(x - 5, y - 5, x + 5, y + 5,
fill='#F00', outline='#F00')
corners_counted[0] += 1
if corners_counted[0] == 4:
root.quit()
canvas.bind("<Button 1>", get_corner_coords)
root.mainloop()
return board_corners
def get_bg_depth():
global corners
corners = get_corners()
mtx = np.matrix(corners)
mean = mtx.mean(0)
middle_coords = (int(mean.item((0, 0))), int(mean.item((0, 1))))
corners_arr = np.array(corners)
global lower_bound, upper_bound
lower_bound = np.amin(corners_arr, axis=0)
upper_bound = np.amax(corners_arr, axis=0)
global depth
(depth,_) = get_depth()
#d3 = np.dstack((depth, depth, depth)).astype(np.uint8)
d3 = np.array(depth)
d3 = np.array([y[lower_bound[0]:upper_bound[0]]
for y in d3[lower_bound[1]:upper_bound[1]]])
mean_vertical = d3.min(axis=0)
return mean_vertical.min(axis=0)
def yposition(x, bg_depth):
d3 = np.array(depth)
column = d3[:, x].astype(np.int8)
columnslice = column[lower_bound[1]:upper_bound[1]]
mode = stats.mode(columnslice)
#plt.clf()
#plt.scatter(np.arange(columnslice.size), columnslice)
return 128
def main(ip_address):
bg_depth_orig = get_bg_depth()
bg_depth_unsigned = bg_depth_orig.astype(np.uint8)
bg_depth = bg_depth_orig.astype(np.int8)
print bg_depth
plt.ion()
plt.show()
corners_arr = np.array(corners)
while True:
(depth,_) = get_depth()
depth_array = np.array(depth).astype(np.int8)
d3 = np.array([y[lower_bound[0]:upper_bound[0]]
for y in depth_array[lower_bound[1]:upper_bound[1]]])
depths_per_x = d3.min(axis=0) - bg_depth
mean_depths_per_x = d3.mean(axis=0) - bg_depth
mean_depths_per_x = np.array([i if i < -3 else 0 for i in mean_depths_per_x])
depths_per_x = np.array([i if i < -3 else 0 for i in depths_per_x])
minimum = np.amin(mean_depths_per_x)
if minimum != 0:
spanleft = None
spanright = None
bodyx = None
for pos, i in enumerate(depths_per_x):
if spanleft is None:
if i < -10:
spanleft = pos + 5
if i < -10:
spanright = pos - 5
for pos, i in enumerate(mean_depths_per_x):
if i == minimum:
bodyx = pos
break
if spanleft == None or spanright == None:
return
bodyy = yposition(bodyx, bg_depth)
bodyz = depth_array[bodyx][bodyy]
lefty = yposition(spanleft, bg_depth)
leftz = depth_array[spanleft][lefty]
righty = yposition(spanright, bg_depth)
rightz = depth_array[spanright][righty]
yrange = upper_bound[1] - lower_bound[1]
xxrange = upper_bound[0] - lower_bound[0]
if spanleft < bodyx - 25:
pointx, pointy, pointz = spanleft, lefty, leftz
urllib2.urlopen(
"http://%s:8888/update?pointer=%f,%f&position=%f,%f,%f" %(ip_address,
pointx / xxrange / 2, pointy / yrange, spanleft/xxrange, bodyx/xxrange,
spanright/xxrange)).read()
elif spanright > bodyx + 25:
pointx, pointy, pointz = spanright, righty, rightz
urllib2.urlopen(
"http://%s:8888/update?pointer=%f,%f&position=%f,%f,%f" %(ip_address,
pointx / xxrange / 2, pointy / yrange, spanleft/xxrange,
bodyx/xxrange, spanright/xxrange)).read()
else:
urllib2.urlopen(
"http://%s:8888/update?pointer=0,0&position=%f,%f,%f" %(ip_address,
spanleft/xxrange, bodyx/xxrange, spanright/xxrange)).read()
else:
urllib2.urlopen(
"http://%s:8888/update?pointer=0,0" %ip_address).read()
plt.clf()
plt.scatter(np.arange(depths_per_x.size), depths_per_x)
plt.ylim([-255, 255])
plt.draw()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Please provide the IP address of the app server as an argument."
else:
main(sys.argv[1]) | mit |
h0nzZik/IA158-patrol-robot | modules/btstack/platforms/mtk/docs/scripts/plot_scan_two_groups.py | 3 | 8983 | #!/usr/bin/env python
import matplotlib.pyplot as plt
#from pylab import *
import cPickle
import pylab as P
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.patches import Polygon
import itertools
import os
def histplot(data,labels, colors, x_label, y_label, title, fig_name, cdf):
fig, ax = plt.subplots()
if cdf:
n, bins, patches = ax.hist(data, 20, weights=None, histtype='step', normed=True, cumulative=True, label= labels, color = colors)
legend = ax.legend(loc='lower left', shadow=False)
ax.grid(True)
else:
n, bins, patches = ax.hist( data, 20, weights=None, histtype='bar', label= labels, color = colors)
legend = ax.legend(loc='upper right', shadow=False)
for line in ax.get_lines():
line.set_linewidth(1.5)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
for label in legend.get_texts():
label.set_fontsize('small')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
fig.suptitle(title, fontsize=12)
#plt.show()
pp = PdfPages(fig_name)
pp.savefig(fig)
pp.close()
return [n, bins, patches]
def accplot(data, labels, colors, x_label, y_label, title, fig_name, annotation):
mean = np.zeros(len(data))
for i in range(len(data)):
if len(data[i]) > 0:
mean[i] = len(data[i]) /(1.0*max(data[i]))
mean = round(mean)
fig, ax = plt.subplots()
for i in range(len(data)):
if len(data[i]) > 0:
ax.plot(data[i], range(len(data[i])), colors[i], label= labels[i]+', '+mean[i]+' adv/s, total nr. '+str(len(data[i])))
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
for tl in ax.get_yticklabels():
tl.set_color('k')
legend = ax.legend(loc='upper left', shadow=False)
for label in legend.get_texts():
label.set_fontsize('small')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
for line in ax.get_lines():
line.set_linewidth(1.5)
fig.suptitle(title, fontsize=12)
ax.text(400, 5000, annotation , style='italic',
bbox={'facecolor':'gray', 'alpha':0.5, 'pad':10})
#plt.show()
pp = PdfPages(fig_name)
pp.savefig(fig)
pp.close()
return fig
def mean_common_len(data):
mcl = 0
for i in range(len(data) - 1):
if len(data[i]) > 0:
if mcl == 0:
mcl = len(data[i])
else:
mcl = min(mcl, len(data[i]))
return mcl
def mean_common_time(data):
mct = 0
for i in range(len(data) - 1):
if len(data[i]) > 0:
if mct == 0:
mct = max(data[i])
else:
mct = min(mct, max(data[i]))
return mct
def normalize(s):
return map(lambda x: (x - s[0]), s)
def delta(s):
rs = list()
for i in range(len(s)-1):
rs.append(s[i+1] - s[i])
return rs
def round(s):
return map(lambda x: "{0:.4f}".format(x), s)
def cut(s, V):
r = list()
for i in range(len(s)):
if s[i] <= V:
r.append(s[i])
return r
def prepare_data(exp_name, sensor_name):
prefix = '../data/processed/'
scanning_type = exp_name+'_continuous'
mn = cPickle.load(open(prefix+scanning_type+'_mac_'+sensor_name+'.data', 'rb')) # mac nio,
mm = cPickle.load(open(prefix+scanning_type+'_mac_mac.data', 'rb')) # mac mac,
rn = cPickle.load(open(prefix+scanning_type+'_rug_'+sensor_name+'.data', 'rb')) # ruggear nio,
rm = cPickle.load(open(prefix+scanning_type+'_rug_mac.data', 'rb')) # ruggear mac,
scanning_type = exp_name+'_normal'
try:
normal_rn = cPickle.load(open(prefix + scanning_type+'_rug_'+sensor_name+'.data', 'rb')) # ruggear mac, normal
except:
normal_rn = list()
try:
normal_mn = cPickle.load(open(prefix + scanning_type+'_mac_'+sensor_name+'.data', 'rb')) # ruggear mac, normal
except:
normal_mn = list()
try:
normal_rm = cPickle.load(open(prefix + scanning_type+'_rug_mac.data', 'rb')) # ruggear mac, normal
except:
normal_rm = list()
try:
normal_mm = cPickle.load(open(prefix + scanning_type+'_mac_mac.data', 'rb')) # ruggear mac, normal
except:
normal_mm = list()
T = mean_common_time([mm, mn, rm, rn, normal_rm, normal_rn, normal_mm, normal_mn])
L = mean_common_len([mm, mn, rm, rn, normal_rm, normal_rn, normal_mm, normal_mn])
Z = 15
print "mct %d, mcl %d" % (T,L)
mac_mac = normalize(mm)
mac_nio = normalize(mn)
ruggeer_mac = normalize(rm)
ruggeer_nio = normalize(rn)
ruggeer_nio_normal = normalize(normal_rn)
ruggeer_mac_normal = normalize(normal_rm)
mac_mac_normal = normalize(normal_mm)
mac_nio_normal = normalize(normal_mn)
delta_mn = delta(mac_nio)
delta_mm = delta(mac_mac)
delta_rn = delta(ruggeer_nio)
delta_rm = delta(ruggeer_mac)
rn_delays = list()
for i in range(len(delta_rn)):
rn_delays.append(range(delta_rn[i]))
flattened_rn_delays = list(itertools.chain.from_iterable(rn_delays))
plot_data = [cut(mac_mac,T), cut(mac_nio,T), cut(ruggeer_mac,T), cut(ruggeer_nio,T)]
plot_data_normal = [cut(mac_mac_normal,T), cut(mac_nio_normal,T), cut(ruggeer_mac_normal,T), cut(ruggeer_nio_normal,T)]
hist_data = [delta_mm[0:L], delta_mn[0:L], delta_rm[0:L], delta_rn[0:L]]
zoomed_hist_data = list()
if len(hist_data[0]) >= Z and len(hist_data[1]) >= Z and len(hist_data[2]) >= Z and len(hist_data[3]) >= Z :
zoomed_hist_data = [cut(hist_data[0],Z), cut(hist_data[1],Z), cut(hist_data[2],Z), cut(hist_data[3],Z)]
return [plot_data, hist_data, zoomed_hist_data, flattened_rn_delays, plot_data_normal]
def plot(exp_name, sensor_name, sensor_title, prefix):
[plot_data0, hist_data0, zoomed_hist_data0, rn_delays0, plot_data_normal0] = prepare_data(exp_name, sensor_name)
labels0 = ['Scan. BCM, Adv. BCM', 'Scan. BCM, Adv. '+ sensor_title, 'Scan. RugGear, Adv. BCM', 'Scan. RugGear, Adv. '+sensor_title]
plot_colors0 = ['r-','k-','b-','g-']
hist_colors0 = ['red','black','blue','green']
group_index1 = 2;
group_index2 = 3;
plot_data = [plot_data0[group_index1], plot_data0[group_index2]]
hist_data = [hist_data0[group_index1], hist_data0[group_index2]]
zoomed_hist_data = [zoomed_hist_data0[group_index1], zoomed_hist_data0[group_index2]]
rn_delays = [rn_delays0[group_index1], rn_delays0[group_index2]]
plot_data_normal = [plot_data_normal0[group_index1], plot_data_normal0[group_index2]]
labels = [labels0[group_index1], labels0[group_index2]]
plot_colors = [plot_colors0[group_index1], plot_colors0[group_index2]]
hist_colors = [hist_colors0[group_index1], hist_colors0[group_index2]]
title = 'Continuous scanning over time'
annotation = 'scan window 30ms, scan interval 30ms'
x_label = 'Time [s]'
y_label = 'Number of advertisements'
accplot(plot_data, labels, plot_colors, x_label, y_label, title, prefix+sensor_name+'_acc_number_of_advertisements_continuous_scanning.pdf', annotation)
x_label = 'Time interval between two advertisements [s]'
title = 'Continuous scanning - interval distribution'
histplot(hist_data, labels, hist_colors, x_label, y_label, title, prefix+sensor_name+'_histogram_advertisements_time_delay.pdf', 0)
#if len(zoomed_hist_data) > 0:
# title = 'Continuous scanning - interval distribution [0-15s]'
# histplot(zoomed_hist_data, labels, hist_colors, x_label, y_label, title, prefix+sensor_name+'_histogram_advertisements_time_delay_zoomed.pdf', 0)
# title = 'Continuous scanning - expected waiting time'
# x_label = 'Expected waiting time until first scan [s]'
# [n, bins, patches] = histplot([rn_delays], [labels0[3]], [hist_colors0[3]], x_label, y_label, title, prefix+sensor_name+'_ruggear_expected_scan_response.pdf', 0)
# title = 'Continuous scanning - expected waiting time probability distribution'
# y_label = 'Advertisement probability'
# x_label = 'Time until first scan [s]'
# [n, bins, patches] = histplot([rn_delays], [labels0[3]], [hist_colors0[3]], x_label, y_label, title, prefix+sensor_name+'_ruggear_cdf.pdf', 1)
title = 'Normal scanning over time'
annotation = 'scan window 30ms, scan interval 300ms'
x_label = 'Time [s]'
y_label = 'Number of advertisements'
if len(plot_data_normal[0]) > 0:
accplot(plot_data_normal, labels, plot_colors, x_label, y_label, title, prefix+sensor_name+'_acc_number_of_advertisements_normal_scanning.pdf', annotation)
picts_folder = "../picts_experiments/"
if not os.access(picts_folder, os.F_OK):
os.mkdir(picts_folder)
#plot('exp1','nio', 'Nio')
plot('exp2','xg1', 'XG', picts_folder)
plot('exp2','xg2', 'XG', picts_folder)
| gpl-2.0 |
ralbayaty/KaggleRetina | testing/makeCENSUREfeatures.py | 1 | 2325 | from skimage import data
from skimage import transform as tf
from skimage.feature import CENSURE
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_harris,
corner_peaks, ORB, plot_matches)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
import cv2
import sys
import os
import numpy as np
import cPickle as pickle
def pickle_keypoints(keypoints, descriptors):
i = 0
temp_array = []
for point in keypoints:
temp = (point, descriptors[i])
++i
temp_array.append(temp)
return temp_array
# Need to figure out how to process none cv2 kp and des
def unpickle_keypoints(array):
keypoints = []
descriptors = []
for point in array:
temp_feature = cv2.KeyPoint(x=point[0][0],y=point[0][1],_size=point[1],
_angle=point[2], _response=point[3], _octave=point[4], _class_id=point[5])
temp_descriptor = point[6]
keypoints.append(temp_feature)
descriptors.append(temp_descriptor)
return keypoints, np.array(descriptors)
#########
try:
folder1 = sys.argv[1]
if folder1 not in ["sample", "train", "test"]:
print("The folder name provided wasn't: sample, train, or test; using sample folder.")
folder1 = "sample"
except:
print("Didn't give me a folder; using sample folder.")
folder1 = "sample"
file_names = os.listdir("/home/dick/Documents/Kaggle/" + folder1)
N = len(file_names)
print("Progress: 0 %"),
for i in range(N):
img = cv2.imread(folder1 + "/" + file_names[i],1)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
censure = CENSURE()
censure.detect(gray)
kp = censure.keypoints
scales = censure.scales
print(len(kp))
plt.imshow(img)
plt.axis('off')
plt.scatter(censure.keypoints[:, 1], censure.keypoints[:, 0],
2 ** censure.scales, facecolors='none', edgecolors='r')
plt.show()
#Store and Retrieve keypoint features
temp_array = []
temp = pickle_keypoints(kp, scales)
temp_array.append(temp)
pickle.dump(temp_array, open("features/" + folder1 + "/censure/gray/" + file_names[i][:-5] + "_censure.pkl", "wb"))
temp = str(float((i+1)*100/N))
print("\rProgress: " + temp + " %"), | gpl-2.0 |
YeEmrick/learning | cs231/assignment/assignment2/experiments/ThreeLayerConvnet/conf_init_maker.py | 1 | 1536 | import os
import sys
from sklearn.externals import joblib
import json
import numpy as np
DIR_CS231n = '/Users/cthorey/Documents/MLearning/CS231/assignment2/'
conf = {}
# Model instance
conf['input_dim'] = (3, 32, 32)
conf['num_filters'] = 64
conf['filter_size'] = 3
conf['hidden_dim'] = 500
conf['num_classes'] = 10
conf['weight_scale'] = 1e-3
conf['use_batchnorm'] = True
# Solver instance
conf['update_rule'] = 'adam'
conf['lr_decay'] = 0.99
conf['batch_size'] = 50
conf['num_epochs'] = 1
conf['print_every'] = 10
conf['verbose'] = True
# Helper function
def name_model(path):
''' Given a directory where you want to run a new model
automatically select the name of the model by incrementing
by 1 the largest previous model in the name'''
existing_models = [f for f in os.listdir(
path) if f.split('_')[0] == 'model']
if len(existing_models) == 0:
model = -1
else:
model = max([int(f.split('_')[1]) for f in existing_models])
return os.path.join(path, 'model_' + str(model + 1))
name = os.listdir(DIR_CS231n)
dir_json = name_model(os.path.join(
DIR_CS231n, 'experiments', 'ThreeLayerConvnet'))
conf['path'] = dir_json
try:
'Initialize the model tree'
os.mkdir(dir_json)
except:
raise ValueError(
'Cannot create the directory for the model %s' % (dir_json))
with open(os.path.join(dir_json, 'conf_init.json'), 'w+') as f:
json.dump(conf,
f,
sort_keys=True,
indent=4,
ensure_ascii=False)
| apache-2.0 |
milankl/swm | plot/budget_closure.py | 1 | 7718 | from __future__ import print_function
path = '/network/aopp/cirrus/pred/kloewer/swm_bf_cntrl/data/'
dpath = '/network/aopp/cirrus/pred/kloewer/swm_back_ronew/'
outpath = '/network/home/aopp/kloewer/swm/paperplot/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import matplotlib.pyplot as plt
import time as tictoc
from netCDF4 import Dataset
import glob
from matplotlib.colors import BoundaryNorm,LogNorm
import cmocean
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['mathtext.rm'] = 'serif'
# functions
def h2mat(h,param):
return h.reshape((param['ny'],param['nx']))
def u2mat(u,param):
return u.reshape((param['ny'],param['nx']-1))
def v2mat(v,param):
return v.reshape((param['ny']-1,param['nx']))
def q2mat(q,param):
return q.reshape((param['ny']+1,param['nx']+1))
# OPTIONS
runfolders_cntrl = [12,10,0,11,6,9]
runfolders_back = [0,1,2,3,6,7,8,4]
runfolders_start = np.array([1,0,5,2,5,0])*365 # start year for computations of energy budget
budget_terms_cntrl = np.zeros((len(runfolders_cntrl),4)) # four terms for cntrl: dE/dt, wind, bottom friction, viscosity
budget_terms_back = np.zeros((len(runfolders_back),5)) # five terms for back: dE/dt, wind, bottom friction, viscosity, backscatter
res_cntrl = np.zeros(len(runfolders_cntrl))
dx_cntrl = np.zeros(len(runfolders_cntrl))
rdiss = np.zeros(len(runfolders_back))
# CONTROL
for i,r in enumerate(runfolders_cntrl):
runpath = path+'run%04i' % r
D = np.load(runpath+'/analysis/power_map.npy').all()
E = np.load(runpath+'/analysis/mean_timeseries.npy').all()
param = np.load(runpath+'/param.npy').all()
Estart = (E['KEm']+E['PEm'])[runfolders_start[i]]
Eend = (E['KEm']+E['PEm'])[-1]
dt = 4*param['output_dt']*(len(E['KEm'][runfolders_start[i]:])-1)
budget_terms_cntrl[i,0] = (Eend - Estart)/dt
budget_terms_cntrl[i,1] = D['InPower_T'].mean()
budget_terms_cntrl[i,2] = D['BfricPower_T'].mean()
budget_terms_cntrl[i,3] = D['ExPower_T'].mean()
res_cntrl[i] = param['nx']
dx_cntrl[i] = param['dx']
# BACKSCATTER
for i,r in enumerate(runfolders_back):
runpath = dpath+'run%04i' % r # use different path
D = np.load(runpath+'/analysis/power_map.npy').all()
E = np.load(runpath+'/analysis/mean_timeseries.npy').all()
param = np.load(runpath+'/param.npy').all()
Estart = (E['KEm']+E['PEm'])[5*365] # for backscatter runs discard first 5 years
Eend = (E['KEm']+E['PEm'])[-1]
dt = 4*param['output_dt']*(len(E['KEm'][5*365:])-1)
budget_terms_back[i,0] = (Eend - Estart)/dt
budget_terms_back[i,1] = D['InPower_T'].mean()
budget_terms_back[i,2] = D['BfricPower_T'].mean()
budget_terms_back[i,3] = D['ExPower_T'].mean()
budget_terms_back[i,4] = D['BackPower_T'].mean()
rdiss[i] = param['n_diss']
closure_cntrl = budget_terms_cntrl.sum(axis=1)
closure_back = budget_terms_back.sum(axis=1)
closure_norm_cntrl = np.sqrt((budget_terms_cntrl**2).sum(axis=1))
closure_norm_back = np.sqrt((budget_terms_back**2).sum(axis=1))
# treat viscosity and backscatter as one
budget_terms_back[:,3] = budget_terms_back[:,3:].sum(axis=1)
budget_terms_back[:,4] = 0
dissipation = budget_terms_cntrl[:,2:].sum(axis=1)
## PLOTTING 1 versus backscatter strength Rdiss
s = 1e3
p = 0.001
norm = (closure_norm_back.mean() + closure_norm_cntrl.mean())/2*s*p
fig,(ax1,ax) = plt.subplots(2,1,sharex=True)
for i in range(len(runfolders_cntrl)):
ax1.plot(0,budget_terms_cntrl[i,0]*s,"C"+str(i)+"x",alpha=.6)
ax1.plot(0,budget_terms_cntrl[i,1]*s,"C"+str(i)+"s",alpha=.6)
ax1.plot(0,budget_terms_cntrl[i,2]*s,"C"+str(i)+"^",alpha=.6)
ax1.plot(0,budget_terms_cntrl[i,3]*s,"C"+str(i)+"o",alpha=.6)
ax1.plot(-10,0,"x",color="grey",label="dE/dt")
ax1.plot(-10,0,"s",color="grey",label="wind stress")
ax1.plot(-10,0,"^",color="grey",label="bottom friction")
ax1.plot(-10,0,"o",color="grey",label="viscosity + backscatter")
ax1.legend(loc=3,ncol=4,fontsize=6)
for i in range(len(runfolders_back)):
ax1.plot(i+1,budget_terms_back[i,0]*s,"kx",alpha=.7)
ax1.plot(i+1,budget_terms_back[i,1]*s,"ks",alpha=.7)
ax1.plot(i+1,budget_terms_back[i,2]*s,"k^",alpha=.7)
ax1.plot(i+1,budget_terms_back[i,3]*s,"ko",alpha=.7)
ax.plot([-1,10],[0,0],"grey",lw=0.2)
ax1.plot([-1,10],[0,0],"grey",lw=0.2)
lines = [0]*len(closure_cntrl)
for i,cc in enumerate(closure_cntrl):
lines[i], = ax.plot(0,cc*s,"C"+str(i)+"+",ms=3,label=r"$\Delta x$ = {:.2f}km".format(dx_cntrl[i]/1e3))
first_legend = plt.legend(loc=3,handles=lines,title="Control runs",fontsize=6)
fl = plt.gca().add_artist(first_legend)
linef = ax.fill_between([-1,10],[-norm,-norm],[norm,norm],alpha=.2,label="budget unclosed by <0.1%")
line, = ax.plot(1+np.arange(len(rdiss)),closure_back*s,"k*",label=r"$\Delta x$ = 30km",alpha=.7)
plt.legend(loc=8,handles=[line,linef],title="Backscatter runs",fontsize=6)
ax1.set_ylim(-10,10)
ax.set_xlim(-1,len(rdiss)+1)
ax.set_xticks(np.arange(len(rdiss)+1))
ax.set_xticklabels([0,1,2,6,8,16,32,64,r"$\infty$"])
ax.set_xlabel(r"Backscatter strength $R_{diss}$")
ax.set_ylabel(r"[mW m$^{-2}$]")
ax1.set_ylabel(r"[mW m$^{-2}$]")
ax1.set_title("Energy budget")
ax.set_title("Budget closure")
ax1.set_title("a",loc="left",fontweight="bold")
ax.set_title("b",loc="left",fontweight="bold")
plt.tight_layout()
plt.savefig(outpath+'plots/budget_closure.pdf')
plt.close(fig)
## PLOTTING 2 - versus RESOLUTION
fig,(ax,ax1) = plt.subplots(2,1)
for i in range(len(runfolders_cntrl)):
ax.plot(res_cntrl[i],budget_terms_cntrl[i,0]*s,"C"+str(i)+"x",alpha=.6)
ax.plot(res_cntrl[i],budget_terms_cntrl[i,1]*s,"C"+str(i)+"s",alpha=.6)
ax.plot(res_cntrl[i],budget_terms_cntrl[i,2]*s,"C"+str(i)+"^",alpha=.6)
ax.plot(res_cntrl[i],budget_terms_cntrl[i,3]*s,"C"+str(i)+"o",alpha=.6)
ax.plot(-10,0,"x",color="grey",label="dE/dt")
ax.plot(-10,0,"s",color="grey",label="wind stress")
ax.plot(-10,0,"^",color="grey",label="bottom friction")
ax.plot(-10,0,"o",color="grey",label="viscosity")
for i in range(len(runfolders_cntrl)):
ax1.plot(res_cntrl[i],budget_terms_cntrl[i,2]/dissipation[i]*100,"C"+str(i)+"^",alpha=.6)
ax1.plot(res_cntrl[i],budget_terms_cntrl[i,3]/dissipation[i]*100,"C"+str(i)+"o",alpha=.6)
ax.plot(res_cntrl,budget_terms_cntrl[:,1]*s,"k",zorder=1,lw=.8)
ax.plot(res_cntrl,budget_terms_cntrl[:,2]*s,"k",zorder=1,lw=.8)
ax.plot(res_cntrl,budget_terms_cntrl[:,3]*s,"k",zorder=1,lw=.8)
ax1.plot(res_cntrl,budget_terms_cntrl[:,2]/dissipation*100,"k",zorder=1,lw=1)
ax1.plot(res_cntrl,budget_terms_cntrl[:,3]/dissipation*100,"k",zorder=1,lw=1)
ax.legend(loc=4,ncol=4,fontsize=6)
ax.plot([-1,1300],[0,0],"--",lw=1,color="grey")
ax1.plot([-1,1300],[0,0],"--",lw=1,color="grey")
ax1.plot([-1,1300],[100,100],"--",lw=1,color="grey")
ax1.plot(-100,0,"^",color="grey",label="bottom friction")
ax1.plot(-100,0,"o",color="grey",label="viscosity")
ax1.legend(loc=7)
ax.set_ylim(-10,10)
ax1.set_ylim(-5,105)
ax.set_xlim(-1,1050)
ax1.set_xlim(-1,1050)
ax1.set_xticks(res_cntrl)
ax.set_xticks(res_cntrl)
ax.set_xticklabels([120,60,30,15,7.5,3.75],rotation=90,fontsize=8)
ax1.set_xticklabels([r"32$^2$",r"64$^2$",r"128$^2$",r"256$^2$",r"512$^2$",r"1024$^2$"],fontsize=8)
ax.set_xlabel(r"Resolution $\Delta x$ [km]",fontsize=8)
ax1.set_xlabel(r"Grid cells $N^2$",fontsize=8)
ax.set_ylabel(r"[mW m$^{-2}$]")
ax1.set_ylabel("[%]")
ax.set_title("Energy budget")
ax1.set_title("Dissipation: Bottom friction vs Viscosity")
ax.set_title("a",loc="left",fontweight="bold")
ax1.set_title("b",loc="left",fontweight="bold")
plt.tight_layout()
plt.savefig(outpath+'plots/energy_budget_terms.png',dpi=200)
plt.close(fig) | gpl-3.0 |
anurag313/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
dmartinalbo/image-matching | image-matching.py | 1 | 8094 | from __future__ import division
import sys
import os
import argparse
import logging
import numpy as np
import cv2
from matplotlib import pyplot as plt
# load global logger
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(message)s')
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
def calculate_SIFT(img):
# Find the keypoints and descriptors using SIFT features
kp, des = sift.detectAndCompute(img,None)
return kp, des
def knn_match(des1, des2, nn_ratio=0.7):
# FLANN parameters
index_params = dict(algorithm = 0, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
# Match features from each image
matches = flann.knnMatch(des1, des2, k=2)
# store only the good matches as per Lowe's ratio test.
good = []
for m, n in matches:
if m.distance < nn_ratio * n.distance:
good.append(m)
return good
# calculate the angle with the horizontal
def angle_horizontal(v):
return -np.arctan2(v[1],v[0])
def knn_clasif(good_matches):
best_template, highest_logprob = None, 0.0
sum_good_matches = sum([len(gm) for gm in good_matches])
for i, gm in enumerate(good_matches):
logprob = len(gm)/sum_good_matches
# save highest
if logprob > highest_logprob:
highest_logprob = logprob
best_template = i
logger.info('p(t_{} | x) = {:.4f}'.format(i, logprob))
return best_template
def main(argv=None):
try:
parser = argparse.ArgumentParser(description='Image Classification and Matching Using Local Features and Homography.')
parser.add_argument('-t', dest='template_names', nargs='+', required=True, help='List of template images')
parser.add_argument('-q', dest='query_names', nargs='+', required=True, help='List of query images')
parser.add_argument('-o', dest='output_path', help='Output directory', default='.')
parser.add_argument('-c', dest='bounding_boxes', nargs='*', help='Bounding boxes to crop. ("WxH+X+Y")')
parser.add_argument('-v', dest='verbosity', action='store_true', help='Increase output verbosity')
parser.add_argument('-p', dest='photocopied', action='store_true', help='Use only if the image is scanned or photocopied, do not with photos!')
parser.add_argument('--matches', dest='view_matches', action='store_true', help="Shows the matching result and the good matches")
parser.set_defaults(view_matches=False)
parser.set_defaults(photocopied=False)
args = parser.parse_args()
except Exception, e:
logger.error('Error', exc_info=True)
return 2
# logging stuff
if args.verbosity:
logger.setLevel(logging.DEBUG)
# load template images
templates = []
for name in args.template_names:
logger.info('Loading template image {}'.format(name))
img = cv2.imread(name, cv2.IMREAD_GRAYSCALE)
logger.info(' Calculating SIFT features ...')
kp, des = calculate_SIFT(img)
templates.append( [name, img, kp, des])
# load query
for name in args.query_names:
logger.info('Loading query image {}'.format(name))
img = cv2.imread(name, cv2.IMREAD_GRAYSCALE)
logger.info(' Calculating SIFT features ...'.format(name))
query_kp, query_des = calculate_SIFT(img)
# for each template, calculate the best match
list_good_matches = []
for templ_name, _, _, templ_des in templates:
logger.info('Estimating match between {} and {}'.format(templ_name, name))
gm = knn_match(templ_des, query_des)
list_good_matches.append(gm)
# Get closer template using k-nn
best_template = knn_clasif(list_good_matches)
# Keep the best result the best result
template_kp = templates[best_template][2]
good_matches = list_good_matches[best_template]
# data massaging
src_pts = np.float32([ template_kp[m.queryIdx].pt for m in good_matches ]).reshape(-1,1,2)
dst_pts = np.float32([ query_kp[m.trainIdx].pt for m in good_matches ]).reshape(-1,1,2)
logger.info('Estimating homography between {} and {}'.format(templates[best_template][0], name))
# find the matrix transformation M
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 6.0)
matchesMask = mask.ravel().tolist()
# Make it affine
M[2,2] = 1.0
M[2,0] = 0.0
M[2,1] = 0.0
# Calculate the rectangle enclosing the query image
h,w = templates[best_template][1].shape
# Define the rectangle in the coordinates of the template image
pts = np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0]]).reshape(-1,1,2)
# transform the rectangle from the template "coordinates" to the query "coordinates"
dst = cv2.perspectiveTransform(pts,M)
if args.photocopied:
logger.info('Simplifying transformation matrix ...'.format(templates[best_template][0], name))
# if the image is a photocopy or scanned we can assume that there is no shear in x or y.
# Thus we can simplify the transformation matrix M with only: rotation, scale and tranlation.
# calculate template "world" reference vectors
w_v = np.array([w-1,0])
h_v = np.array([h-1,0])
# calculate query "world" reference vectors
w_vp = (dst[3]-dst[0])[0]
h_vp = (dst[1]-dst[0])[0]
# We lost the angle and the scale given that scalation shares the same position in M with the shear transformation
# see https://upload.wikimedia.org/wikipedia/commons/2/2c/2D_affine_transformation_matrix.svg
# estimate the angle using the top-horizontal line
angle = angle_horizontal(w_vp)
# estimate the scale using the top-horizontal line and left-vertical line
scale_x = np.linalg.norm(w_vp) / np.linalg.norm(w_v)
scale_y = np.linalg.norm(h_vp) / np.linalg.norm(h_v)
# retrieve translation from original matrix M
M = np.matrix([[ scale_x * np.cos(angle) , np.sin(angle) , M[0,2] ],
[ -np.sin(angle) , scale_y * np.cos(angle) , M[1,2] ],
[ 0 , 0 , 1. ]])
# retransform the rectangle with the new matrix
dst = cv2.perspectiveTransform(pts,M)
# if bounding boxes are provided and we only have one template
# crop those bounding boxes
bn, ext = os.path.splitext(os.path.basename(name))
# using M^{-1} we go from query coordinates to template coordinates.
img_templ_coords = cv2.warpPerspective(img, np.linalg.inv(M), (w,h))
if len(args.template_names) == 1 and args.bounding_boxes:
cont_bbs = 0
for bb in args.bounding_boxes:
# parse bb string to variables
width, height, x_ini, y_ini = (int(c) for c in bb.replace('x','+').split('+'))
#
logger.info('Cropping "{}x{}+{}+{}" from {}'.format(width, height, x_ini, y_ini, name))
# crop image
img_templ_coords_crop = img_templ_coords[y_ini:y_ini+height, x_ini:x_ini+width]
# write it
cv2.imwrite('{}/{}_crop_{}{}'.format(args.output_path, bn, cont_bbs, ext), img_templ_coords_crop)
logger.info(' Saved in {}/{}_crop_{}{}'.format(args.output_path, bn, cont_bbs, ext))
cont_bbs += 1
else:
logger.info('No crop. Saving full image in {}/{}_fix{}'.format(args.output_path, bn, ext))
cv2.imwrite('{}/{}_fix{}'.format(args.output_path, bn, ext), img_templ_coords)
if args.view_matches:
# draw the rectangle in the image
out = cv2.polylines(img,[np.int32(dst)],True,0,2, cv2.LINE_AA)
# show the matching features
params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
## draw the matches image
out = cv2.drawMatches(templates[best_template][1], template_kp,
img, query_kp,
good_matches,
None, **params)
## show result
plt.imshow(out, 'gray')
plt.show()
if __name__ == "__main__":
sys.exit(main())
| mit |
Jimmy-Morzaria/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
jakereps/q2-diversity | q2_diversity/_beta/_beta_rarefaction.py | 2 | 7065 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import pkg_resources
import os.path
import functools
import qiime2
import biom
import skbio
import seaborn as sns
import scipy
from emperor import Emperor
import q2templates
from . import METRICS
from .._ordination import pcoa
TEMPLATES = pkg_resources.resource_filename('q2_diversity', '_beta')
def beta_rarefaction(output_dir: str, table: biom.Table, metric: str,
clustering_method: str, metadata: qiime2.Metadata,
sampling_depth: int, iterations: int = 10,
phylogeny: skbio.TreeNode = None,
correlation_method: str = 'spearman',
color_scheme: str = 'BrBG') -> None:
with qiime2.sdk.Context() as scope:
if table.is_empty():
raise ValueError("Input feature table is empty.")
# Filter metadata to only include sample IDs present in the feature
# table. Also ensures every feature table sample ID is present in the
# metadata.
metadata = metadata.filter_ids(table.ids(axis='sample'))
table = qiime2.Artifact.import_data('FeatureTable[Frequency]', table)
if metric in METRICS['PHYLO']['IMPL'] | METRICS['PHYLO']['UNIMPL']:
if phylogeny is None:
raise ValueError("A phylogenetic metric (%s) was requested, "
"but a phylogenetic tree was not provided. "
"Phylogeny must be provided when using a "
"phylogenetic diversity metric." % metric)
phylogeny = qiime2.Artifact.import_data('Phylogeny[Rooted]',
phylogeny)
api_method = scope.ctx.get_action('diversity', 'beta_phylogenetic')
beta_func = functools.partial(api_method, phylogeny=phylogeny)
else:
beta_func = scope.ctx.get_action('diversity', 'beta')
rare_func = scope.ctx.get_action('feature-table', 'rarefy')
distance_matrices = _get_multiple_rarefaction(
beta_func, rare_func, metric, iterations, table, sampling_depth)
primary = distance_matrices[0]
support = distance_matrices[1:]
heatmap_fig, similarity_df = _make_heatmap(
distance_matrices, metric, correlation_method, color_scheme)
heatmap_fig.savefig(os.path.join(output_dir, 'heatmap.svg'))
similarity_df.to_csv(
os.path.join(output_dir, 'rarefaction-iteration-correlation.tsv'),
sep='\t')
tree = _cluster_samples(primary, support, clustering_method)
tree.write(os.path.join(output_dir,
'sample-clustering-%s.tre' % clustering_method))
emperor = _jackknifed_emperor(primary, support, metadata)
emperor_dir = os.path.join(output_dir, 'emperor')
emperor.copy_support_files(emperor_dir)
with open(os.path.join(emperor_dir, 'index.html'), 'w') as fh:
fh.write(emperor.make_emperor(standalone=True))
templates = list(map(
lambda page: os.path.join(TEMPLATES, 'beta_rarefaction_assets', page),
['index.html', 'heatmap.html', 'tree.html', 'emperor.html']))
context = {
'metric': metric,
'clustering_method': clustering_method,
'tabs': [{'url': 'emperor.html',
'title': 'PCoA'},
{'url': 'heatmap.html',
'title': 'Heatmap'},
{'url': 'tree.html',
'title': 'Clustering'}]
}
q2templates.render(templates, output_dir, context=context)
def _get_multiple_rarefaction(beta_func, rare_func, metric, iterations, table,
sampling_depth):
distance_matrices = []
for _ in range(iterations):
rarefied_table, = rare_func(table=table, sampling_depth=sampling_depth)
distance_matrix, = beta_func(table=rarefied_table, metric=metric)
distance_matrices.append(distance_matrix.view(skbio.DistanceMatrix))
return distance_matrices
def _make_heatmap(distance_matrices, metric, correlation_method, color_scheme):
test_statistics = {'spearman': "Spearman's rho", 'pearson': "Pearson's r"}
sm_df = skbio.stats.distance.pwmantel(
distance_matrices, method=correlation_method, permutations=0,
strict=True)
sm = sm_df[['statistic']] # Drop all other DF columns
sm = sm.unstack(level=0) # Reshape for seaborn
ax = sns.heatmap(
sm, cmap=color_scheme, vmin=-1.0, vmax=1.0, center=0.0, annot=False,
square=True, xticklabels=False, yticklabels=False,
cbar_kws={'ticks': [1, 0.5, 0, -0.5, -1],
'label': test_statistics[correlation_method]})
ax.set(xlabel='Iteration', ylabel='Iteration',
title='%s - Mantel correlation between iterations' % metric)
return ax.get_figure(), sm_df
def _cluster_samples(primary, support, clustering_method):
cluster = {'nj': _nj, 'upgma': _upgma}[clustering_method]
primary = cluster(primary)
primary_internal_nodes = list(primary.non_tips())
support_total = len(support)
for n in primary_internal_nodes:
n.support_count = 0
for dm in support:
_add_support_count(primary_internal_nodes, cluster(dm))
for n in primary_internal_nodes:
n.name = str(n.support_count / support_total)
del n.support_count
return primary
def _upgma(dm):
upper_triangle = dm.condensed_form()
linkage = scipy.cluster.hierarchy.average(upper_triangle)
tree = skbio.TreeNode.from_linkage_matrix(linkage, dm.ids)
tree.name = "root" # root_at_midpoint for _nj labels the root
return tree
def _nj(dm):
# Negative branch lengths are strange, BUT we are clustering, not modeling
# evolution, so it's not necessarily a problem
nj = skbio.tree.nj(dm, disallow_negative_branch_length=False)
return nj.root_at_midpoint()
def _add_support_count(nodes, support):
# This isn't a fast or really good way to compute this, but it is obvious.
for n in nodes:
n_tips = {t.name for t in n.tips()}
corresponding_node = support.lca(n_tips)
if {t.name for t in corresponding_node.tips()} == n_tips:
# This node has the same tips as the lca of the support tree with
# the same tips, so there aren't any missing or extra nodes in the
# support's subtree. (Though the subtree's topology may differ.)
n.support_count += 1
def _jackknifed_emperor(primary_matrix, support_matrices, metadata):
primary_pcoa = pcoa(primary_matrix)
jackknifed_pcoa = list(map(pcoa, support_matrices))
df = metadata.to_dataframe()
return Emperor(primary_pcoa, df, jackknifed=jackknifed_pcoa, remote='.')
| bsd-3-clause |
michaelaye/scikit-image | doc/examples/plot_brief.py | 32 | 1879 | """
=======================
BRIEF binary descriptor
=======================
This example demonstrates the BRIEF binary description algorithm.
The descriptor consists of relatively few bits and can be computed using
a set of intensity difference tests. The short binary descriptor results
in low memory footprint and very efficient matching based on the Hamming
distance metric.
BRIEF does not provide rotation-invariance. Scale-invariance can be achieved by
detecting and extracting features at different scales.
"""
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.astronaut())
tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = tf.warp(img1, tform)
img3 = tf.rotate(img1, 25)
keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)
extractor = BRIEF()
extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors
extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors
extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')
plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')
plt.show()
| bsd-3-clause |
ciyer/stockscape | src/python/Stockscape/stockscape/dsr.py | 1 | 6058 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
dsr.py
Utilities for the DeLong-Shiller Redux (dsr).
Created by Chandrasekhar Ramakrishnan on 2017-10-02.
Copyright (c) 2017 Chandrasekhar Ramakrishnan. All rights reserved.
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import statsmodels.formula.api as smf
def time_ticks(hop=10, start=1880, end=2020):
"""Standard tick points for DSR visualizations"""
return [str(y) for y in np.arange(start, end, hop)]
def periods_from_df(time_df):
"""Take a frame and return a breakdown of the consecutive periods represented in the frame."""
period = [time_df.index[0]]
periods = [period]
all_period_years = [time_df.index[0]]
for i in range(1, len(time_df.index)):
all_period_years.append(time_df.index[i])
if time_df.index[i - 1] + 1 == time_df.index[i]:
period.append(time_df.index[i])
else:
period = [time_df.index[i]]
periods.append(period)
period_labels = ["{}-{}".format(p[0], p[-1]) for p in periods]
return all_period_years, [(p, pl) for p, pl in zip(periods, period_labels)]
def cite_source(ax):
ax.annotate('Source: Robert Shiller', (1, 0), (-2, -30), fontsize=8,
xycoords='axes fraction', textcoords='offset points', va='bottom', ha='right')
def split_to_and_since_delong(df):
"""Split the frame into time periods that DeLong analyzed and those since his article.
:param df: The frame to split
:return: Tuple with (to_delong, since_delong)
"""
to_delong_index = [d for d in df.index if d.year <= 2004 and d.month < 6]
since_delong_index = [d for d in df.index if d.year > 2004 or (d.year is 2004 and d.month >= 6)]
return df.loc[to_delong_index], df.loc[since_delong_index]
def latest_index_label(ser_of_df):
"""Return a string for the latest date in the ser_of_df.
:param ser_of_df: The series or frame to process
:return: String (month(short) 'YY) for the date
"""
return ser_of_df.dropna().index[-1].strftime("%b '%y").lower()
def split_cape_threshold_years(df, threshold=25, period_col='period'):
"""Split the df into those years above (or equal) and years below the CAPE threshold.
In addition to splitting, add a columns labels the period.
:param df: The data frame to apply the threshold to
:param threshold: Defaults to 25
:return: (above_threshold with period and year columns, below_threshold)
"""
above_threshold = pd.DataFrame(df[df['cape'] >= threshold], copy=True)
above_threshold['year'] = [i.year for i in above_threshold.index]
above_threshold_years = above_threshold.groupby('year').count()
above_threshold_years = above_threshold_years[above_threshold_years['price'] > 1]
above_threshold_period_years, above_threshold_periods_and_labels = periods_from_df(above_threshold_years)
for period, label in above_threshold_periods_and_labels:
above_threshold.loc[above_threshold['year'].isin(period), period_col] = label
all_high_cape_period_years_set = set(above_threshold_period_years)
below_threshold = df.loc[[d for d in df.index if d.year not in all_high_cape_period_years_set]]
return above_threshold, below_threshold
def loss_indices(*dfs):
"""Return the indices where any of the dfs experienced losses.
:param dfs: The data frames to analyze
:return: Unique indices in which some df had a loss
"""
li = np.concatenate([np.array(df[df['returns'] < 0].index) for df in dfs])
return np.unique(li)
def inversion_indices(df1, df2, column):
"""Return the indices in which df1[column] > df2[column]
:param df1: A data frame
:param df2: Another data frame
:param column: A shared column
:return: The indices where df1[column] > df2[column]
"""
return df1[df1[column] > df2[column]].index
class DsrStylePrefs(object):
"""Utility class for styles/preferences/palettes"""
def __init__(self):
self.figure_full_size = (10.0, 7.5)
self.figure_medium_size = (8.0, 5.5)
self.figure_small_size = (8.0, 3.75)
self.s_palette = sns.color_palette('Blues_r')[0:4]
self.b_palette = sns.color_palette('Purples_r')[0:4]
l_palette = sns.color_palette('Dark2')
self.l_palette = [l_palette[i] for i in [1, 5, 3, 4, 0]]
def use(self):
"""Applies styling to matplotlib """
if 'ciyer' in mpl.style.available:
plt.style.use(['seaborn-darkgrid', 'ciyer'])
plt.rcParams["figure.figsize"] = self.figure_full_size
class LinearModel(object):
"""Bundle the relevant information from a linear regression."""
def __init__(self, ind, dep, df, pred_range):
"""Build a linear model of data
:param ind: Independent variable
:param dep: Dependent variable
:param df: The frame to fit the model against
:param pred_range: The range to predict on.
"""
self.ind = ind
self.dep = dep
self.df = df
self.pred_range = pred_range
self.lm = None
self.predictions = None
def fit_and_predict(self):
self.lm = smf.ols(formula="{} ~ {}".format(self.dep, self.ind), data=self.df).fit()
preds_input = pd.DataFrame({self.ind: self.pred_range})
self.predictions = self.lm.predict(preds_input)
return self
@property
def x_intercept(self):
return (-1 * self.lm.params[0]) / self.lm.params[1]
@property
def rsquared(self):
return self.lm.rsquared
@property
def rsquared_computed(self):
"""Compute rsquared from the data"""
preds_input = pd.DataFrame({'cape': self.df[self.ind]})
preds = self.lm.predict(preds_input)
ss_res = np.sum(np.power((self.df[self.dep] - preds).dropna(), 2))
dep_mean = self.df[self.dep].mean()
ss_tot = np.sum(np.power((self.df[self.dep] - dep_mean).dropna(), 2))
return 1 - (ss_res / ss_tot)
| bsd-3-clause |
musically-ut/statsmodels | statsmodels/discrete/discrete_margins.py | 19 | 25467 | #Splitting out maringal effects to see if they can be generalized
from statsmodels.compat.python import lzip, callable, range
import numpy as np
from scipy.stats import norm
from statsmodels.tools.decorators import cache_readonly, resettable_cache
#### margeff helper functions ####
#NOTE: todo marginal effects for group 2
# group 2 oprobit, ologit, gologit, mlogit, biprobit
def _check_margeff_args(at, method):
"""
Checks valid options for margeff
"""
if at not in ['overall','mean','median','zero','all']:
raise ValueError("%s not a valid option for `at`." % at)
if method not in ['dydx','eyex','dyex','eydx']:
raise ValueError("method is not understood. Got %s" % method)
def _check_discrete_args(at, method):
"""
Checks the arguments for margeff if the exogenous variables are discrete.
"""
if method in ['dyex','eyex']:
raise ValueError("%s not allowed for discrete variables" % method)
if at in ['median', 'zero']:
raise ValueError("%s not allowed for discrete variables" % at)
def _get_const_index(exog):
"""
Returns a boolean array of non-constant column indices in exog and
an scalar array of where the constant is or None
"""
effects_idx = exog.var(0) != 0
if np.any(~effects_idx):
const_idx = np.where(~effects_idx)[0]
else:
const_idx = None
return effects_idx, const_idx
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([ True, False, False, True, True], dtype=bool)
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = (np.max(X, axis=0) == 1)
min = (np.min(X, axis=0) == 0)
remainder = np.all(X % 1. == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return np.where(ind)[0]
def _get_dummy_index(X, const_idx):
dummy_ind = _isdummy(X)
dummy = True
# adjust back for a constant because effects doesn't have one
if const_idx is not None:
dummy_ind[dummy_ind > const_idx] -= 1
if dummy_ind.size == 0: # don't waste your time
dummy = False
dummy_ind = None # this gets passed to stand err func
return dummy_ind, dummy
def _iscount(X):
"""
Given an array X, returns the column indices for count variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 10, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _iscount(X)
>>> ind
array([ True, False, False, True, True], dtype=bool)
"""
X = np.asarray(X)
remainder = np.logical_and(np.logical_and(np.all(X % 1. == 0, axis = 0),
X.var(0) != 0), np.all(X >= 0, axis=0))
dummy = _isdummy(X)
remainder = np.where(remainder)[0].tolist()
for idx in dummy:
remainder.remove(idx)
return np.array(remainder)
def _get_count_index(X, const_idx):
count_ind = _iscount(X)
count = True
# adjust back for a constant because effects doesn't have one
if const_idx is not None:
count_ind[count_ind > const_idx] -= 1
if count_ind.size == 0: # don't waste your time
count = False
count_ind = None # for stand err func
return count_ind, count
def _get_margeff_exog(exog, at, atexog, ind):
if atexog is not None: # user supplied
if isinstance(atexog, dict):
# assumes values are singular or of len(exog)
for key in atexog:
exog[:,key] = atexog[key]
elif isinstance(atexog, np.ndarray): #TODO: handle DataFrames
if atexog.ndim == 1:
k_vars = len(atexog)
else:
k_vars = atexog.shape[1]
try:
assert k_vars == exog.shape[1]
except:
raise ValueError("atexog does not have the same number "
"of variables as exog")
exog = atexog
#NOTE: we should fill in atexog after we process at
if at == 'mean':
exog = np.atleast_2d(exog.mean(0))
elif at == 'median':
exog = np.atleast_2d(np.median(exog, axis=0))
elif at == 'zero':
exog = np.zeros((1,exog.shape[1]))
exog[0,~ind] = 1
return exog
def _get_count_effects(effects, exog, count_ind, method, model, params):
"""
If there's a count variable, the predicted difference is taken by
subtracting one and adding one to exog then averaging the difference
"""
# this is the index for the effect and the index for count col in exog
for i in count_ind:
exog0 = exog.copy()
exog0[:, i] -= 1
effect0 = model.predict(params, exog0)
exog0[:, i] += 2
effect1 = model.predict(params, exog0)
#NOTE: done by analogy with dummy effects but untested bc
# stata doesn't handle both count and eydx anywhere
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = ((effect1 - effect0)/2)
return effects
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
"""
If there's a dummy variable, the predicted difference is taken at
0 and 1
"""
# this is the index for the effect and the index for dummy col in exog
for i in dummy_ind:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:,i] = 0
effect0 = model.predict(params, exog0)
#fittedvalues0 = np.dot(exog0,params)
exog0[:,i] = 1
effect1 = model.predict(params, exog0)
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = (effect1 - effect0)
return effects
def _effects_at(effects, at):
if at == 'all':
effects = effects
elif at == 'overall':
effects = effects.mean(0)
else:
effects = effects[0,:]
return effects
def _margeff_cov_params_dummy(model, cov_margins, params, exog, dummy_ind,
method, J):
"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d = 1 - F(XB) | d = 0
The row of the Jacobian for this variable is given by
f(XB)*X | d = 1 - f(XB)*X | d = 0
Where F is the default prediction of the model.
"""
for i in dummy_ind:
exog0 = exog.copy()
exog1 = exog.copy()
exog0[:,i] = 0
exog1[:,i] = 1
dfdb0 = model._derivative_predict(params, exog0, method)
dfdb1 = model._derivative_predict(params, exog1, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0)
if J > 1:
K = dfdb.shape[1] // (J-1)
cov_margins[i::K, :] = dfdb
else:
cov_margins[i, :] = dfdb # how each F changes with change in B
return cov_margins
def _margeff_cov_params_count(model, cov_margins, params, exog, count_ind,
method, J):
"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d += 1 - F(XB) | d -= 1
The row of the Jacobian for this variable is given by
(f(XB)*X | d += 1 - f(XB)*X | d -= 1) / 2
where F is the default prediction for the model.
"""
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
dfdb0 = model._derivative_predict(params, exog0, method)
exog0[:,i] += 2
dfdb1 = model._derivative_predict(params, exog0, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0) / 2
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
cov_margins[i, :] = dfdb # how each F changes with change in B
return cov_margins
def margeff_cov_params(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
Computes the variance-covariance of marginal effects by the delta method.
Parameters
----------
model : model instance
The model that returned the fitted results. Its pdf method is used
for computing the Jacobian of discrete variables in dummy_ind and
count_ind
params : array-like
estimated model parameters
exog : array-like
exogenous variables at which to calculate the derivative
cov_params : array-like
The variance-covariance of the parameters
at : str
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Only overall has any effect here.you
derivative : function or array-like
If a function, it returns the marginal effects of the model with
respect to the exogenous variables evaluated at exog. Expected to be
called derivative(params, exog). This will be numerically
differentiated. Otherwise, it can be the Jacobian of the marginal
effects with respect to the parameters.
dummy_ind : array-like
Indices of the columns of exog that contain dummy variables
count_ind : array-like
Indices of the columns of exog that contain count variables
Notes
-----
For continuous regressors, the variance-covariance is given by
Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'
where V is the parameter variance-covariance.
The outer Jacobians are computed via numerical differentiation if
derivative is a function.
"""
if callable(derivative):
from statsmodels.tools.numdiff import approx_fprime_cs
params = params.ravel('F') # for Multinomial
try:
jacobian_mat = approx_fprime_cs(params, derivative,
args=(exog,method))
except TypeError: # norm.cdf doesn't take complex values
from statsmodels.tools.numdiff import approx_fprime
jacobian_mat = approx_fprime(params, derivative,
args=(exog,method))
if at == 'overall':
jacobian_mat = np.mean(jacobian_mat, axis=1)
else:
jacobian_mat = jacobian_mat.squeeze() # exog was 2d row vector
if dummy_ind is not None:
jacobian_mat = _margeff_cov_params_dummy(model, jacobian_mat,
params, exog, dummy_ind, method, J)
if count_ind is not None:
jacobian_mat = _margeff_cov_params_count(model, jacobian_mat,
params, exog, count_ind, method, J)
else:
jacobian_mat = derivative
#NOTE: this won't go through for at == 'all'
return np.dot(np.dot(jacobian_mat, cov_params), jacobian_mat.T)
def margeff_cov_with_se(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
See margeff_cov_params.
Same function but returns both the covariance of the marginal effects
and their standard errors.
"""
cov_me = margeff_cov_params(model, params, exog, cov_params, at,
derivative, dummy_ind,
count_ind, method, J)
return cov_me, np.sqrt(np.diag(cov_me))
def margeff():
pass
def _check_at_is_all(method):
if method['at'] == 'all':
raise NotImplementedError("Only margeff are available when `at` is "
"all. Please input specific points if you would like to "
"do inference.")
_transform_names = dict(dydx='dy/dx',
eyex='d(lny)/d(lnx)',
dyex='dy/d(lnx)',
eydx='d(lny)/dx')
class Margins(object):
"""
Mostly a do nothing class. Lays out the methods expected of a sub-class.
This is just a sketch of what we may want out of a general margins class.
I (SS) need to look at details of other models.
"""
def __init__(self, results, get_margeff, derivative, dist=None,
margeff_args=()):
self._cache = resettable_cache()
self.results = results
self.dist = dist
self.get_margeff(margeff_args)
def _reset(self):
self._cache = resettable_cache()
def get_margeff(self, *args, **kwargs):
self._reset()
self.margeff = self.get_margeff(*args)
@cache_readonly
def tvalues(self):
raise NotImplementedError
@cache_readonly
def cov_margins(self):
raise NotImplementedError
@cache_readonly
def margins_se(self):
raise NotImplementedError
def summary_frame(self):
raise NotImplementedError
@cache_readonly
def pvalues(self):
raise NotImplementedError
def conf_int(self, alpha=.05):
raise NotImplementedError
def summary(self, alpha=.05):
raise NotImplementedError
#class DiscreteMargins(Margins):
class DiscreteMargins(object):
"""Get marginal effects of a Discrete Choice model.
Parameters
----------
results : DiscreteResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = resettable_cache()
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = resettable_cache()
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
"""
_check_at_is_all(self.margeff_options)
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i,name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return DataFrame(table, columns=names, index=var_names)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]),]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
# sigh, we really need to hold on to this in _data...
_, const_idx = _get_const_index(model.exog)
if const_idx is not None:
exog_names.pop(const_idx)
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
#NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:,eq], margeff_se[:,eq],
tvalues[:,eq], pvalues[:,eq], conf_int[:,:,eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha, use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
tble.insert_header_row(0, header)
#from IPython.core.debugger import Pdb; Pdb().set_trace()
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is all
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
self._reset() # always reset the cache when this is called
#TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx, const_idx = _get_const_index(exog)
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
J = getattr(model, 'J', 1)
effects_idx = np.tile(effects_idx, J) # adjust for multi-equation.
effects = _effects_at(effects, at)
if at == 'all':
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[:, effects_idx].reshape(-1, K, J,
order='F')
else:
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(model, params, exog,
results.cov_params(), at,
model._derivative_exog,
dummy_idx, count_idx,
method, J)
# reshape for multi-equation
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[effects_idx].reshape(K, J, order='F')
self.margeff_se = margeff_se[effects_idx].reshape(K, J,
order='F')
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
else:
# don't care about at constant
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
| bsd-3-clause |
JosmanPS/scikit-learn | examples/gaussian_process/gp_diabetes_dataset.py | 223 | 1976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
| bsd-3-clause |
elisamussumeci/InfoDenguePredict | docs/conf.py | 1 | 8771 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../infodenguepredict")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'InfoDenguePredict'
copyright = u'2016, Flávio Codeço Coelho'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from infodenguepredict import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'infodenguepredict-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'InfoDenguePredict Documentation',
u'Flávio Codeço Coelho', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| gpl-3.0 |
florian-f/sklearn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/pylab_examples/line_collection.py | 12 | 1511 | import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
import numpy as np
# In order to efficiently plot many lines in a single set of axes,
# Matplotlib has the ability to add the lines all at once. Here is a
# simple example showing how it is done.
x = np.arange(100)
# Here are many sets of y to plot vs x
ys = x[:50, np.newaxis] + x[np.newaxis, :]
segs = np.zeros((50, 100, 2), float)
segs[:,:,1] = ys
segs[:,:,0] = x
# Mask some values to test masked array support:
segs = np.ma.masked_where((segs > 50) & (segs < 60), segs)
# We need to set the plot limits.
ax = plt.axes()
ax.set_xlim(x.min(), x.max())
ax.set_ylim(ys.min(), ys.max())
# colors is sequence of rgba tuples
# linestyle is a string or dash tuple. Legal string values are
# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq)
# where onoffseq is an even length tuple of on and off ink in points.
# If linestyle is omitted, 'solid' is used
# See matplotlib.collections.LineCollection for more information
line_segments = LineCollection(segs,
linewidths = (0.5,1,1.5,2),
colors = [colorConverter.to_rgba(i) \
for i in ('b','g','r','c','m','y','k')],
linestyle = 'solid')
ax.add_collection(line_segments)
ax.set_title('Line collection with masked arrays')
plt.show()
| gpl-2.0 |
wangmiao1981/spark | python/pyspark/ml/clustering.py | 15 | 62447 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, \
HasAggregationDepth, HasWeightCol, HasTol, HasProbabilityCol, HasDistanceMeasure, \
HasCheckpointInterval, Param, Params, TypeConverters
from pyspark.ml.util import JavaMLWritable, JavaMLReadable, GeneralJavaMLWritable, \
HasTrainingSummary, SparkContext
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.common import inherit_doc, _java2py
from pyspark.ml.stat import MultivariateGaussian
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel', 'KMeansSummary',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
@inherit_doc
class _GaussianMixtureParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasProbabilityCol, HasTol, HasAggregationDepth, HasWeightCol):
"""
Params for :py:class:`GaussianMixture` and :py:class:`GaussianMixtureModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_GaussianMixtureParams, self).__init__(*args)
self._setDefault(k=2, tol=0.01, maxIter=100, aggregationDepth=2)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureModel(JavaModel, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("3.0.0")
def gaussians(self):
"""
Array of :py:class:`MultivariateGaussian` where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i
"""
sc = SparkContext._active_spark_context
jgaussians = self._java_obj.gaussians()
return [
MultivariateGaussian(_java2py(sc, jgaussian.mean()), _java2py(sc, jgaussian.cov()))
for jgaussian in jgaussians]
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@since("3.0.0")
def predictProbability(self, value):
"""
Predict probability for the given features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class GaussianMixture(JavaEstimator, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. versionadded:: 2.0.0
Notes
-----
For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> gm.getMaxIter()
100
>>> gm.setMaxIter(30)
GaussianMixture...
>>> gm.getMaxIter()
30
>>> model = gm.fit(df)
>>> model.getAggregationDepth()
2
>>> model.getFeaturesCol()
'features'
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
>>> model.predict(df.head().features)
2
>>> model.predictProbability(df.head().features)
DenseVector([0.0, 0.0, 1.0])
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> weights = model.weights
>>> len(weights)
3
>>> gaussians = model.gaussians
>>> len(gaussians)
3
>>> gaussians[0].mean
DenseVector([0.825, 0.8675])
>>> gaussians[0].cov
DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], 0)
>>> gaussians[1].mean
DenseVector([-0.87, -0.72])
>>> gaussians[1].cov
DenseMatrix(2, 2, [0.0016, 0.0016, 0.0016, 0.0016], 0)
>>> gaussians[2].mean
DenseVector([-0.055, -0.075])
>>> gaussians[2].cov
DenseMatrix(2, 2, [0.002, -0.0011, -0.0011, 0.0006], 0)
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[4].newPrediction == rows[5].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussians[0].mean == model.gaussians[0].mean
True
>>> model2.gaussians[0].cov == model.gaussians[0].cov
True
>>> model2.gaussians[1].mean == model.gaussians[1].mean
True
>>> model2.gaussians[1].cov == model.gaussians[1].cov
True
>>> model2.gaussians[2].mean == model.gaussians[2].mean
True
>>> model2.gaussians[2].cov == model.gaussians[2].cov
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
>>> gm2.setWeightCol("weight")
GaussianMixture...
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GaussianMixtureSummary(ClusteringSummary):
"""
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _KMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, HasTol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`KMeans` and :py:class:`KMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_KMeansParams, self).__init__(*args)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
class KMeansModel(JavaModel, _KMeansParams, GeneralJavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(super(KMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class KMeans(JavaEstimator, _KMeansParams, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> kmeans = KMeans(k=2)
>>> kmeans.setSeed(1)
KMeans...
>>> kmeans.setWeightCol("weighCol")
KMeans...
>>> kmeans.setMaxIter(10)
KMeans...
>>> kmeans.getMaxIter()
10
>>> kmeans.clear(kmeans.maxIter)
>>> model = kmeans.fit(df)
>>> model.getDistanceMeasure()
'euclidean'
>>> model.setPredictionCol("newPrediction")
KMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.0
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("1.5.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.5.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.5.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.5.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@inherit_doc
class _BisectingKMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`BisectingKMeans` and :py:class:`BisectingKMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_BisectingKMeansParams, self).__init__(*args)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
class BisectingKMeansModel(JavaModel, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
.. deprecated:: 3.0.0
It will be removed in future versions. Use :py:class:`ClusteringEvaluator` instead.
You can also get the cost on the training dataset in the summary.
"""
warnings.warn("Deprecated in 3.0.0. It will be removed in future versions. Use "
"ClusteringEvaluator instead. You can also get the cost on the training "
"dataset in the summary.", FutureWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class BisectingKMeans(JavaEstimator, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> bkm.setMaxIter(10)
BisectingKMeans...
>>> bkm.getMaxIter()
10
>>> bkm.clear(bkm.maxIter)
>>> bkm.setSeed(1)
BisectingKMeans...
>>> bkm.setWeightCol("weighCol")
BisectingKMeans...
>>> bkm.getSeed()
1
>>> bkm.clear(bkm.seed)
>>> model = bkm.fit(df)
>>> model.getMaxIter()
20
>>> model.setPredictionCol("newPrediction")
BisectingKMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.0
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.000...
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("3.0.0")
def trainingCost(self):
"""
Sum of squared distances to the nearest centroid for all points in the training dataset.
This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _LDAParams(HasMaxIter, HasFeaturesCol, HasSeed, HasCheckpointInterval):
"""
Params for :py:class:`LDA` and :py:class:`LDAModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
def __init__(self, *args):
super(_LDAParams, self).__init__(*args)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class LDAModel(JavaModel, _LDAParams):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
.. warning:: If this model is actually a :py:class:`DistributedLDAModel`
instance produced by the Expectation-Maximization ("em") `optimizer`,
then this method could involve collecting a large amount of data
to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
.. warning:: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes
-----
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. versionadded:: 2.0.0
Returns
-------
list
List of checkpoint files from training
Notes
-----
Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, _LDAParams, JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> lda.setMaxIter(10)
LDA...
>>> lda.getMaxIter()
10
>>> lda.clear(lda.maxIter)
>>> model = lda.fit(df)
>>> model.setSeed(1)
DistributedLDAModel...
>>> model.getTopicDistributionCol()
'topicDistribution'
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
>>> model.transform(df).take(1) == sameLocalModel.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currently only support 'em' and 'online'.
Examples
--------
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
Examples
--------
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
Examples
--------
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
Examples
--------
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
Examples
--------
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
Examples
--------
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
Examples
--------
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
Examples
--------
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
Examples
--------
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@inherit_doc
class _PowerIterationClusteringParams(HasMaxIter, HasWeightCol):
"""
Params for :py:class:`PowerIterationClustering`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_PowerIterationClusteringParams, self).__init__(*args)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@inherit_doc
class PowerIterationClustering(_PowerIterationClusteringParams, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
`Lin and Cohen <http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf>`_. From the
abstract: PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. versionadded:: 2.4.0
Notes
-----
See `Wikipedia on Spectral clustering <http://en.wikipedia.org/wiki/Spectral_clustering>`_
Examples
--------
>>> data = [(1, 0, 0.5),
... (2, 0, 0.5), (2, 1, 0.7),
... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9),
... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1),
... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight").repartition(1)
>>> pic = PowerIterationClustering(k=2, weightCol="weight")
>>> pic.setMaxIter(40)
PowerIterationClustering...
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |0 |
|1 |0 |
|2 |0 |
|3 |0 |
|4 |0 |
|5 |1 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
>>> pic2.assignClusters(df).take(6) == assignments.take(6)
True
"""
@keyword_only
def __init__(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.4.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| apache-2.0 |
jviada/QuantEcon.py | examples/qs.py | 7 | 1456 |
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from matplotlib import cm
xmin, xmax = -4, 12
x = 10
alpha = 0.5
m, v = x, 10
xgrid = np.linspace(xmin, xmax, 200)
fig, ax = plt.subplots()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data', 0))
ax.set_ylim(-0.05, 0.5)
ax.set_xticks((x,))
ax.set_xticklabels((r'$x$',), fontsize=18)
ax.set_yticks(())
K = 3
for i in range(K):
m = alpha * m
v = alpha * alpha * v + 1
f = norm(loc=m, scale=np.sqrt(v))
k = (i + 0.5) / K
ax.plot(xgrid, f.pdf(xgrid), lw=1, color='black', alpha=0.4)
ax.fill_between(xgrid, 0 * xgrid, f.pdf(xgrid), color=cm.jet(k), alpha=0.4)
ax.annotate(r'$Q(x,\cdot)$', xy=(6.6, 0.2), xycoords='data',
xytext=(20, 90), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2"))
ax.annotate(r'$Q^2(x,\cdot)$', xy=(3.6, 0.24), xycoords='data',
xytext=(20, 90), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2"))
ax.annotate(r'$Q^3(x,\cdot)$', xy=(-0.2, 0.28), xycoords='data',
xytext=(-90, 90), textcoords='offset points', fontsize=16,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0.2"))
fig.show()
| bsd-3-clause |
miklos1/fayette | generate.py | 1 | 3091 | import os
import numpy
import pandas
assembly = pandas.read_csv("assembly.csv")
assembly["rate"] = assembly.num_dofs / assembly.parloop_time
matvec = pandas.read_csv("matvec.csv")
matvec["rate"] = matvec.num_dofs / matvec.matvec_time
matfree = pandas.read_csv("matfree.csv")
matfree["rate"] = matfree.num_dofs / matfree.matmult_time
print('Files read.')
outdir = "data"
os.makedirs(outdir, exist_ok=True)
mutate = {"poisson": "poisson",
"hyperelasticity": "hyperelastic",
"curl_curl": "curlcurl",
"stokes_momentum": "stokes_momentum"}
def curve(dataset, problem, config, exp, prefix=""):
name = problem
if config == "base":
mode = "coffee"
elif config == "spectral":
mode = "spectral"
elif config == "underintegration":
problem = {"poisson": "poisson_gll"}[problem]
mode = "spectral"
elif config == "spmv":
pass
else:
assert False, "Unexpected configuration!"
filtered = dataset.loc[lambda r: r.problem == problem]
if config != "spmv":
filtered = filtered.loc[lambda r: r.tsfc_mode == mode]
num_procs, = set(filtered["num_procs"])
series = filtered.groupby(["degree"]).mean()["rate"]
series.to_csv("%s/%s%s_%s.csv" % (outdir, prefix, mutate[name], config), header=True)
array = numpy.array(list(series.to_dict().items()))
x = array[:, 0]
y = array[:, 1]
logC = numpy.log(y) - numpy.log(x**3 / (x+1)**exp)
rho = logC.std() / logC.mean()
if rho > 0.1:
print(problem, config, 'rho =', rho)
C = numpy.exp(logC.mean())
return C, int(numpy.floor(x.min())), int(numpy.ceil(x.max()))
def linear(problem):
with open("%s/%s.dat" % (outdir, mutate[problem]), 'w') as f:
print('C a b', file=f)
C, a, b = curve(matvec, problem, "spmv", 6)
# print(C, a, b, file=f)
C, a, b = curve(matfree, problem, "base", 6)
print(C, a, b, file=f)
C, a, b = curve(matfree, problem, "spectral", 4)
print(C, a, b, file=f)
def bilinear(problem):
with open("%s/bi%s.dat" % (outdir, mutate[problem]), 'w') as f:
print('C a b', file=f)
C, a, b = curve(assembly, problem, "base", 9, prefix="bi")
print(C, a, b, file=f)
C, a, b = curve(assembly, problem, "spectral", 7, prefix="bi")
print(C, a, b, file=f)
def bilinear_poisson():
with open("%s/bipoisson.dat" % (outdir,), 'w') as f:
print('C a b', file=f)
C, a, b = curve(assembly, "poisson", "base", 9, prefix="bi")
print(C, a, b, file=f)
C, a, b = curve(assembly, "poisson", "spectral", 7, prefix="bi")
print(C, a, b, file=f)
C, a, b = curve(assembly, "poisson", "underintegration", 6, prefix="bi")
print(C, a, b, file=f)
def bilinear_stokes_momentum():
curve(assembly, "stokes_momentum", "base", 9)
curve(assembly, "stokes_momentum", "spectral", 9)
bilinear_stokes_momentum()
bilinear_poisson()
bilinear("hyperelasticity")
bilinear("curl_curl")
linear("poisson")
linear("hyperelasticity")
linear("curl_curl")
| mit |
waterponey/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 86 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three exemplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
vitaliykomarov/NEUCOGAR | nest/noradrenaline/nest-2.10.0/pynest/examples/structural_plasticity.py | 5 | 12902 | # -*- coding: utf-8 -*-
#
# structural_plasticity.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Structural Plasticity example
-----------------------
This example shows a simple network of two populations where structural plasticity
is used. The network has 1000 neurons, 80% excitatory and 20% inhibitory. The
simulation starts without any connectivity. A set of homeostatic rules are defined,
according to which structural plasticity will create and delete synapses dynamically
during the simulation until a desired level of electrical activity is reached. The
model of structural plasticity used here corresponds to the formulation presented
in Butz, M., & van Ooyen, A. (2013). A simple rule for dendritic spine and axonal
bouton formation can account for cortical reorganization after focal retinal
lesions. PLoS Comput. Biol. 9 (10), e1003259.
At the end of the simulation, a plot of the evolution of the connectivity in the network
and the average calcium concentration in the neurons is created.
'''
'''
First, we import all necessary modules.
'''
import nest
import numpy
import matplotlib.pyplot as pl
import sys
class StructralPlasticityExample:
def __init__(self):
'''
We define general simulation parameters
'''
# simulated time (ms)
self.t_sim = 200000.0
# simulation step (ms).
self.dt = 0.1
self.number_excitatory_neurons = 800
self.number_inhibitory_neurons = 200
# Structural_plasticity properties
self.update_interval = 1000
self.record_interval = 1000.0
# rate of background Poisson input
self.bg_rate = 10000.0
self.neuron_model = 'iaf_psc_exp'
'''
In this implementation of structural plasticity, neurons grow connection points
called synaptic elements. Synapses can be created between compatible synaptic
elements. The growth of these elements is guided by homeostatic rules, defined
as growth curves.
Here we specify the growth curves for synaptic elements of excitatory and inhibitory
neurons.
'''
# Excitatory synaptic elements of excitatory neurons
self.growth_curve_e_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.05, # Ca2+
}
# Inhibitory synaptic elements of excitatory neurons
self.growth_curve_e_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_e_e['eps'], # Ca2+
}
# Excitatory synaptic elements of inhibitory neurons
self.growth_curve_i_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0004, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.2, # Ca2+
}
# Inhibitory synaptic elements of inhibitory neurons
self.growth_curve_i_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_i_e['eps'] # Ca2+
}
'''
Now we specify the neuron model
'''
self.model_params = {'tau_m': 10.0, # membrane time constant (ms)
'tau_syn_ex': 0.5, # excitatory synaptic time constant (ms)
'tau_syn_in': 0.5, # inhibitory synaptic time constant (ms)
't_ref': 2.0, # absolute refractory period (ms)
'E_L': -65.0, # resting membrane potential (mV)
'V_th': -50.0, # spike threshold (mV)
'C_m': 250.0, # membrane capacitance (pF)
'V_reset': -65.0, # reset potential (mV)
}
self.nodes_e = None
self.nodes_i = None
self.mean_ca_e = []
self.mean_ca_i = []
self.total_connections_e = []
self.total_connections_i = []
'''
We initialize variables for the post-synaptic currents of the excitatory, inhibitory and
external synapses. These values were calculated from a PSP amplitude of 1 for excitatory
synapses, -1 for inhibitory synapses and 0.11 for external synapses.
'''
self.psc_e = 585.0
self.psc_i = -585.0
self.psc_ext = 6.2
def prepare_simulation(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
'''
We set global kernel parameters. Here we define the resolution for the simulation,
which is also the time resolution for the update of the synaptic elements.
'''
nest.SetKernelStatus(
{
'resolution': self.dt
}
)
'''
Set Structural Plasticity synaptic update interval which is how often the connectivity
will be updated inside the network. It is important to notice that synaptic elements
and connections change on different time scales.
'''
nest.SetStructuralPlasticityStatus({
'structural_plasticity_update_interval': self.update_interval,
})
'''
Now we define Structural Plasticity synapses. In this example we create two
synapse models, one for excitatory and one for inhibitory synapses. Then
we define that excitatory synapses can only be created between a
pre synaptic element called 'Axon_ex' and a post synaptic element called
Den_ex. In a similar manner, synaptic elements for inhibitory synapses
are defined.
'''
nest.CopyModel('static_synapse', 'synapse_ex')
nest.SetDefaults('synapse_ex', {'weight': self.psc_e, 'delay': 1.0})
nest.CopyModel('static_synapse', 'synapse_in')
nest.SetDefaults('synapse_in', {'weight': self.psc_i, 'delay': 1.0})
nest.SetStructuralPlasticityStatus({
'structural_plasticity_synapses': {
'synapse_ex': {
'model': 'synapse_ex',
'post_synaptic_element': 'Den_ex',
'pre_synaptic_element': 'Axon_ex',
},
'synapse_in': {
'model': 'synapse_in',
'post_synaptic_element': 'Den_in',
'pre_synaptic_element': 'Axon_in',
},
}
})
def create_nodes(self):
'''
Now we assign the growth curves to the corresponding synaptic elements
'''
synaptic_elements = {
'Den_ex': self.growth_curve_e_e,
'Den_in': self.growth_curve_e_i,
'Axon_ex': self.growth_curve_e_e,
}
synaptic_elements_i = {
'Den_ex': self.growth_curve_i_e,
'Den_in': self.growth_curve_i_i,
'Axon_in': self.growth_curve_i_i,
}
'''
Then it is time to create a population with 80% of the total network size excitatory neurons
and another one with 20% of the total network size of inhibitory neurons
'''
self.nodes_e = nest.Create('iaf_neuron', self.number_excitatory_neurons, {
'synaptic_elements': synaptic_elements
})
self.nodes_i = nest.Create('iaf_neuron', self.number_inhibitory_neurons, {
'synaptic_elements': synaptic_elements_i
})
nest.SetStatus(self.nodes_e, 'synaptic_elements', synaptic_elements)
nest.SetStatus(self.nodes_i, 'synaptic_elements', synaptic_elements_i)
def connect_external_input(self):
'''
We create and connect the Poisson generator for external input
'''
noise = nest.Create('poisson_generator')
nest.SetStatus(noise, {"rate": self.bg_rate})
nest.Connect(noise, self.nodes_e, 'all_to_all', {'weight': self.psc_ext, 'delay': 1.0})
nest.Connect(noise, self.nodes_i, 'all_to_all', {'weight': self.psc_ext, 'delay': 1.0})
'''
In order to save the amount of average calcium concentration in each population through time
we create the function record_ca. Here we use the GetStatus function to retrieve the
value of Ca for every neuron in the network and then store the average.
'''
def record_ca(self):
ca_e = nest.GetStatus(self.nodes_e, 'Ca'), # Calcium concentration
self.mean_ca_e.append(numpy.mean(ca_e))
ca_i = nest.GetStatus(self.nodes_i, 'Ca'), # Calcium concentration
self.mean_ca_i.append(numpy.mean(ca_i))
'''
In order to save the state of the connectivity in the network through time
we create the function record_connectivity. Here we use the GetStatus function to retrieve the
number of connected pre synaptic elements of each neuron. The total amount of excitatory connections
is equal to the total amount of connected excitatory pre synaptic elements. The same applies for
inhibitory connections.
'''
def record_connectivity(self):
syn_elems_e = nest.GetStatus(self.nodes_e, 'synaptic_elements')
syn_elems_i = nest.GetStatus(self.nodes_i, 'synaptic_elements')
self.total_connections_e.append(sum(neuron['Axon_ex']['z_connected'] for neuron in syn_elems_e))
self.total_connections_i.append(sum(neuron['Axon_in']['z_connected'] for neuron in syn_elems_i))
'''
We define a function to plot the recorded values at the end of the simulation.
'''
def plot_data(self):
fig, ax1 = pl.subplots()
ax1.axhline(self.growth_curve_e_e['eps'], linewidth=4.0, color='#9999FF')
ax1.plot(self.mean_ca_e, 'b', label='Ca Concentration Excitatory Neurons', linewidth=2.0)
ax1.axhline(self.growth_curve_i_e['eps'], linewidth=4.0, color='#FF9999')
ax1.plot(self.mean_ca_i, 'r', label='Ca Concentration Inhibitory Neurons', linewidth=2.0)
ax1.set_ylim([0, 0.275])
ax1.set_xlabel("Time in [s]")
ax1.set_ylabel("Ca concentration")
ax2 = ax1.twinx()
ax2.plot(self.total_connections_e, 'm', label='Excitatory connections', linewidth=2.0, linestyle='--')
ax2.plot(self.total_connections_i, 'k', label='Inhibitory connections', linewidth=2.0, linestyle='--')
ax2.set_ylim([0, 2500])
ax2.set_ylabel("Connections")
ax1.legend(loc=1)
ax2.legend(loc=4)
pl.savefig('StructuralPlasticityExample.eps', format='eps')
'''
It is time to specify how we want to perform the simulation. In this function we first enable structural
plasticity in the network and then we simulate in steps. On each step we record the calcium concentration
and the connectivity. At the end of the simulation, the plot of connections and calcium concentration
through time is generated.
'''
def simulate(self):
if nest.NumProcesses() > 1:
sys.exit("For simplicity, this example only works for a single process.")
nest.EnableStructuralPlasticity()
print("Starting simulation")
sim_steps = numpy.arange(0, self.t_sim, self.record_interval)
for i, step in enumerate(sim_steps):
nest.Simulate(self.record_interval)
self.record_ca()
self.record_connectivity()
if i%20 == 0:
print( "Progress: " + str(i/2) + "%" )
print("Simulation finished successfully")
'''
Finally we take all the functions that we have defined and create the sequence for our example.
We prepare the simulation, create the nodes for the network, connect the external input and
then simulate. Please note that as we are simulating 200 biological seconds in this example,
it will take a few minutes to complete.
'''
if __name__ == '__main__':
example = StructralPlasticityExample()
# Prepare simulation
example.prepare_simulation()
example.create_nodes()
example.connect_external_input()
# Start simulation
example.simulate()
example.plot_data()
| gpl-2.0 |
nismod/energy_demand | energy_demand/scripts/weather_scripts/weather_scenario_generate_instance.py | 1 | 1850 | import os
import numpy as np
import pandas as pd
def create_annual_weather_file(
path,
path_out_stations,
year,
weather_scenario_name
):
"""Create annual weather file
write csv file
"""
# Read files
path_stations = os.path.join(path, "{}_stations.csv".format(year))
path_t_min = os.path.join(path, "{}_t_min.npy".format(year))
path_t_max = os.path.join(path, "{}_t_max.npy".format(year))
df_stations = pd.read_csv(path_stations)
stations = df_stations['station_id'].values.tolist()
t_min = np.load(path_t_min)
t_max = np.load(path_t_max)
out_csv = []
for station_cnt, station_id in enumerate(stations):
t_min_station = t_min[station_cnt]
t_max_station = t_max[station_cnt]
for yearday in range(365):
out_csv.append([year, station_id, weather_scenario_name, yearday, t_min_station[yearday], t_max_station[yearday]])
out_csv_array = np.array(out_csv)
return out_csv_array
def collect_multi_year_weather(path, path_out, year):
weather_scenario_name = os.path.join(path_out, "weather_temps_{}".format(year))
path_out_csv = os.path.join(weather_scenario_name + ".csv")
out_csv_array = create_annual_weather_file(
path,
path_out,
year=2015,
weather_scenario_name=weather_scenario_name)
columns = ['timestep', 'station_id', 'stiching_name', 'yearday', 't_min', 't_max',]
df = pd.DataFrame(out_csv_array, columns=columns)
df.to_csv(path_out_csv, index=False)
path = "X:/nismod/data/energy_demand/H-Met_office_weather_data/_complete_meteo_data_all_yrs_cleaned_min_max"
path_out = "X:/nismod/data/energy_demand/H-Met_office_weather_data/_complete_meteo_data_all_yrs_cleaned_min_max"
collect_multi_year_weather(
path,
path_out,
year=2015) | mit |
elkingtonmcb/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/interpolate.py | 73 | 7068 | import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation) we
can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point of
the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also construct
what is called the Voronoi diagram from a Delaunay triangulation by
connecting the circumcenters of the triangles to those of their neighbors to
form a tesselation of irregular polygons covering the plane and containing
only one node from the triangulation. Each point in one node's Voronoi
polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors of
this point as the set of nodes participating in Delaunay triangles whose
circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronooi diagram would have a polygon around the inserted
point. This polygon would "steal" area from the original Voronoi polygons.
For each node i in the natural neighbors set, we compute the area stolen
from its original Voronoi polygon, stolen[i]. We define the natural
neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered by
the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
| agpl-3.0 |
doc-E-brown/FacialLandmarkingReview | experiments/Sec4_ModelDefinition/menpoAAM.py | 1 | 2006 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# S.D.G
"""AAM test for MENPO dataset
:author: Ben Johnston
:license: 3-Clause BSD
"""
# Imports
import os
import menpo.io as mio
from aam import AAM
from menpofit.aam import HolisticAAM, PatchAAM
from sklearn.model_selection import train_test_split
MENPO_DATA_FOLDER = os.getenv('MENPO_DATA',
'~/datasets/ibug/menpo_2017_trainset')
class MenpoAAM(AAM):
""" Menpo AAM class """
def __init__(self, path_to_data=MENPO_DATA_FOLDER, model_type=HolisticAAM,
filename='menpo_aam.txt', verbose=True, profile=False):
super(MenpoAAM, self).__init__(
path_to_data, model_type, filename, verbose)
self.profile = profile
def load_data(self, crop_percentage=0.1,
test_set_ratio=0.3, max_images=None):
""" Load the images and landmarks in an menpo.io
format and crop the images using the specified
landmarks as a guide
Parameters
---------
"""
images = []
for i in mio.import_images(
self.filepath, max_images=max_images, verbose=self.verbose):
# Check if profile or frontal selected
# Frontal has 68 landmarks, profile 39
if self.profile and (i.landmarks['PTS'].lms.points.shape[0] == 68):
continue
elif not self.profile and \
(i.landmarks['PTS'].lms.points.shape[0] == 39):
continue
i = i.crop_to_landmarks_proportion(crop_percentage)
# Convert to grayscale if required
if i.n_channels == 3:
i = i.as_greyscale() # Default to luminosity
images.append(i)
# Split into training and test sets
if self.verbose:
print("%d images being used" % len(images))
self.train_set, self.test_set =\
train_test_split(images, test_size=test_set_ratio, random_state=42)
| gpl-3.0 |
kylerbrown/scikit-learn | sklearn/utils/metaestimators.py | 283 | 2353 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
| bsd-3-clause |
terkkila/scikit-learn | sklearn/lda.py | 56 | 17706 | """
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
| bsd-3-clause |
davidgbe/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <[email protected]>
# Gilles Louppe <[email protected]>
# Andreas Mueller <[email protected]>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
cpitclaudel/dBoost | graphics/utils/__init__.py | 1 | 1422 | TANGO = {"yellow": ("#fce94f", "#edd400", "#c4a000"),
"orange": ("#fcaf3e", "#f57900", "#ce5c00"),
"brown": ("#e9b96e", "#c17d11", "#8f5902"),
"green": ("#8ae234", "#73d216", "#4e9a06"),
"blue": ("#729fcf", "#3465a4", "#204a87"),
"purple": ("#ad7fa8", "#75507b", "#5c3566"),
"red": ("#ef2929", "#cc0000", "#a40000"),
"grey": ("#eeeeec", "#d3d7cf", "#babdb6"),
"black": ("#888a85", "#555753", "#2e3436")}
import sys
import matplotlib as mpl
from matplotlib import pyplot
from os.path import dirname, join
def filename(default):
has_name = len(sys.argv) > 1
return (has_name, sys.argv[1] if has_name else default)
def save2pdf(pdf):
pyplot.tight_layout()
pyplot.savefig(pdf, format = 'pdf')
pyplot.clf()
def rcparams(fontsize = 9):
mpl.rcParams.update({
"font.size": fontsize,
"font.family": "serif",
"font.serif": "computer modern roman",
"axes.titlesize": "medium",
"xtick.labelsize": "small",
"ytick.labelsize": "small",
"legend.fontsize": "medium",
"text.usetex": True,
"text.latex.unicode": True,
"savefig.bbox": "tight",
"savefig.pad_inches": 0.05
})
def setup():
rcparams()
pyplot.gcf().set_size_inches(to_inches(200), to_inches(200)) # full column size is 240pt
def to_inches(points):
return points / 72.26999
| gpl-3.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/missing.py | 2 | 19505 | """
Routines for filling missing data.
"""
import numpy as np
from pandas._libs import algos, lib
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.common import (
ensure_float64,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer,
is_integer_dtype,
is_numeric_v_string_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import isna
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
values_to_mask = np.array(values_to_mask, dtype=dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask = False
else:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if is_scalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask |= False
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isna(arr)
else:
mask |= isna(arr)
# GH 21977
if mask is None:
mask = np.zeros(arr.shape, dtype=bool)
return mask
def clean_fill_method(method, allow_nearest=False):
# asfreq is compat for resampling
if method in [None, "asfreq"]:
return None
if isinstance(method, str):
method = method.lower()
if method == "ffill":
method = "pad"
elif method == "bfill":
method = "backfill"
valid_methods = ["pad", "backfill"]
expecting = "pad (ffill) or backfill (bfill)"
if allow_nearest:
valid_methods.append("nearest")
expecting = "pad (ffill), backfill (bfill) or nearest"
if method not in valid_methods:
msg = "Invalid fill method. Expecting {expecting}. Got {method}".format(
expecting=expecting, method=method
)
raise ValueError(msg)
return method
def clean_interp_method(method, **kwargs):
order = kwargs.get("order")
valid = [
"linear",
"time",
"index",
"values",
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"polynomial",
"krogh",
"piecewise_polynomial",
"pchip",
"akima",
"spline",
"from_derivatives",
]
if method in ("spline", "polynomial") and order is None:
raise ValueError("You must specify the order of the spline or " "polynomial.")
if method not in valid:
raise ValueError(
"method must be one of {valid}. Got '{method}' "
"instead.".format(valid=valid, method=method)
)
return method
def interpolate_1d(
xvalues,
yvalues,
method="linear",
limit=None,
limit_direction="forward",
limit_area=None,
fill_value=None,
bounds_error=False,
order=None,
**kwargs
):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argument.
"""
# Treat the original, non-scipy methods first.
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
# have to call np.asarray(xvalues) since xvalues could be an Index
# which can't be mutated
result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
if method == "time":
if not getattr(xvalues, "is_all_dates", None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError(
"time-weighted interpolation only works "
"on Series or DataFrames with a "
"DatetimeIndex"
)
method = "values"
valid_limit_directions = ["forward", "backward", "both"]
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
msg = "Invalid limit_direction: expecting one of {valid!r}, " "got {invalid!r}."
raise ValueError(
msg.format(valid=valid_limit_directions, invalid=limit_direction)
)
if limit_area is not None:
valid_limit_areas = ["inside", "outside"]
limit_area = limit_area.lower()
if limit_area not in valid_limit_areas:
raise ValueError(
"Invalid limit_area: expecting one of {}, got "
"{}.".format(valid_limit_areas, limit_area)
)
# default limit is unlimited GH #16282
if limit is None:
# limit = len(xvalues)
pass
elif not is_integer(limit):
raise ValueError("Limit must be an integer")
elif limit < 1:
raise ValueError("Limit must be greater than 0")
from pandas import Series
ys = Series(yvalues)
# These are sets of index pointers to invalid values... i.e. {0, 1, etc...
all_nans = set(np.flatnonzero(invalid))
start_nans = set(range(ys.first_valid_index()))
end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
mid_nans = all_nans - start_nans - end_nans
# Like the sets above, preserve_nans contains indices of invalid values,
# but in this case, it is the final set of indices that need to be
# preserved as NaN after the interpolation.
# For example if limit_direction='forward' then preserve_nans will
# contain indices of NaNs at the beginning of the series, and NaNs that
# are more than'limit' away from the prior non-NaN.
# set preserve_nans based on direction using _interp_limit
if limit_direction == "forward":
preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0))
elif limit_direction == "backward":
preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit))
else:
# both directions... just use _interp_limit
preserve_nans = set(_interp_limit(invalid, limit, limit))
# if limit_area is set, add either mid or outside indices
# to preserve_nans GH #16284
if limit_area == "inside":
# preserve NaNs on the outside
preserve_nans |= start_nans | end_nans
elif limit_area == "outside":
# preserve NaNs on the inside
preserve_nans |= mid_nans
# sort preserve_nans and covert to list
preserve_nans = sorted(preserve_nans)
xvalues = getattr(xvalues, "values", xvalues)
yvalues = getattr(yvalues, "values", yvalues)
result = yvalues.copy()
if method in ["linear", "time", "index", "values"]:
if method in ("values", "index"):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if needs_i8_conversion(inds.dtype.type):
inds = inds.view(np.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
result[preserve_nans] = np.nan
return result
sp_methods = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"barycentric",
"krogh",
"spline",
"polynomial",
"from_derivatives",
"piecewise_polynomial",
"pchip",
"akima",
]
if method in sp_methods:
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
result[invalid] = _interpolate_scipy_wrapper(
inds[valid],
yvalues[valid],
inds[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order,
**kwargs
)
result[preserve_nans] = np.nan
return result
def _interpolate_scipy_wrapper(
x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs
):
"""
Passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method.
"""
extra = "{method} interpolation requires SciPy.".format(method=method)
import_optional_dependency("scipy", extra=extra)
from scipy import interpolate
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
"barycentric": interpolate.barycentric_interpolate,
"krogh": interpolate.krogh_interpolate,
"from_derivatives": _from_derivatives,
"piecewise_polynomial": _from_derivatives,
}
if getattr(x, "is_all_dates", False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x._values.astype("i8"), new_x.astype("i8")
if method == "pchip":
try:
alt_methods["pchip"] = interpolate.pchip_interpolate
except AttributeError:
raise ImportError(
"Your version of Scipy does not support " "PCHIP interpolation."
)
elif method == "akima":
alt_methods["akima"] = _akima_interpolate
interp1d_methods = [
"nearest",
"zero",
"slinear",
"quadratic",
"cubic",
"polynomial",
]
if method in interp1d_methods:
if method == "polynomial":
method = order
terp = interpolate.interp1d(
x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error
)
new_y = terp(new_x)
elif method == "spline":
# GH #10633, #24014
if isna(order) or (order <= 0):
raise ValueError(
"order needs to be specified and greater than 0; "
"got order: {}".format(order)
)
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives.
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
order: None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R.
"""
from scipy import interpolate
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif interpolate._isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
def interpolate_2d(
values, method="pad", axis=0, limit=None, fill_value=None, dtype=None
):
"""
Perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result.
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with " "axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == "pad":
values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(
backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype)
)
# reshape back
if ndim == 1:
values = values[0]
return values
def _cast_values_for_fillna(values, dtype):
"""
Cast values to a dtype that algos.pad and algos.backfill can handle.
"""
# TODO: for int-dtypes we make a copy, but for everything else this
# alters the values in-place. Is this intentional?
if (
is_datetime64_dtype(dtype)
or is_datetime64tz_dtype(dtype)
or is_timedelta64_dtype(dtype)
):
values = values.view(np.int64)
elif is_integer_dtype(values):
# NB: this check needs to come after the datetime64 check above
values = ensure_float64(values)
return values
def _fillna_prep(values, mask=None, dtype=None):
# boilerplate for pad_1d, backfill_1d, pad_2d, backfill_2d
if dtype is None:
dtype = values.dtype
if mask is None:
# This needs to occur before datetime/timedeltas are cast to int64
mask = isna(values)
values = _cast_values_for_fillna(values, dtype)
mask = mask.view(np.uint8)
return values, mask
def pad_1d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
algos.pad_inplace(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
algos.backfill_inplace(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
algos.pad_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
values, mask = _fillna_prep(values, mask, dtype)
if np.all(values.shape):
algos.backfill_2d_inplace(values, mask, limit=limit)
else:
# for test coverage
pass
return values
_fill_methods = {"pad": pad_1d, "backfill": backfill_1d}
def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def _interp_limit(invalid, fw_limit, bw_limit):
"""
Get indexers of values that won't be filled
because they exceed the limits.
Parameters
----------
invalid : boolean ndarray
fw_limit : int or None
forward limit to index
bw_limit : int or None
backward limit to index
Returns
-------
set of indexers
Notes
-----
This is equivalent to the more readable, but slower
.. code-block:: python
def _interp_limit(invalid, fw_limit, bw_limit):
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indices from N - 1
N = len(invalid)
f_idx = set()
b_idx = set()
def inner(invalid, limit):
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = set(np.where(windowed)[0] + limit) | set(
np.where((~invalid[: limit + 1]).cumsum() == 0)[0]
)
return idx
if fw_limit is not None:
if fw_limit == 0:
f_idx = set(np.where(invalid)[0])
else:
f_idx = inner(invalid, fw_limit)
if bw_limit is not None:
if bw_limit == 0:
# then we don't even need to care about backwards
# just use forwards
return f_idx
else:
b_idx = list(inner(invalid[::-1], bw_limit))
b_idx = set(N - 1 - np.asarray(b_idx))
if fw_limit == 0:
return b_idx
return f_idx & b_idx
def _rolling_window(a, window):
"""
[True, True, False, True, False], 2 ->
[
[True, True],
[True, False],
[False, True],
[True, False],
]
"""
# https://stackoverflow.com/a/6811241
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
| apache-2.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/feature_selection/tests/test_from_model.py | 5 | 6807 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(data)
clf.set_params(penalty="l1")
clf.fit(X, y)
X_new = assert_warns(
DeprecationWarning, clf.transform, X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, y)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == y), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
def test_calling_fit_reinitializes():
est = LinearSVC(random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
transformer.set_params(estimator__C=100)
transformer.fit(data, y)
assert_equal(transformer.estimator_.C, 100)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold=0.1)
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = 1.0
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
| mit |
arjoly/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
h-mayorquin/M2_complexity_thesis | Analysis/receptive_field_graph_experiment_example.py | 1 | 2436 | import numpy as np
import cPickle
import matplotlib.pyplot as plt
import os
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
## Files
images_folder = './data/'
kernels_folder = './kernels/'
real = ''
stimuli_type_sparse = 'SparseNoise'
stimuli_type_dense = 'DenseNoise'
image_format = '.pickle'
kernel_format = '.npy'
remove_axis = True
save_figures = True
delay = 14
collapse_to = 5
#### Load the dense part
filename_h1kernel_dense = kernels_folder + stimuli_type_dense + 'real_regresion_h1' + kernel_format
filename_h2kernel_dense = kernels_folder + stimuli_type_dense + 'real_regresion_h2' + kernel_format
h1_dense = np.load(filename_h1kernel_dense)
h2_dense = np.load(filename_h2kernel_dense)
#### Plot the dense part
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
blue_red1 = LinearSegmentedColormap('BlueRed1', cdict1)
aux1 = np.min(h1_dense[delay,...])
aux2 = np.min(h2_dense[delay,...])
vmin = np.min([aux1, aux2])
aux1 = np.max(h1_dense[delay,...])
aux2 = np.max(h2_dense[delay,...])
vmax = np.max([aux1, aux2])
vmin = None
vmax = None
figure = plt.gcf()
ax = plt.gca()
plt.imshow(h1_dense[delay, ...], interpolation='bilinear', cmap=blue_red1, vmin=vmin, vmax=vmax)
if remove_axis:
figure.get_axes()[0].get_xaxis().set_visible(False)
figure.get_axes()[0].get_yaxis().set_visible(False)
folder = './figures/'
format = '.pdf'
title = 'example_h1'
save_filename = folder + title + format
figure.set_size_inches(16, 12)
plt.savefig(save_filename, dpi=100)
os.system("pdfcrop %s %s" % (save_filename, save_filename))
plt.show()
figure = plt.gcf()
ax = plt.gca()
im = ax.imshow(h2_dense[delay, ...], interpolation='bilinear', cmap=blue_red1, vmin=vmin, vmax=vmax)
if remove_axis:
figure.get_axes()[0].get_xaxis().set_visible(False)
figure.get_axes()[0].get_yaxis().set_visible(False)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="10%", pad=0.15)
plt.colorbar(im, cax=cax)
title = 'example_h2'
save_filename = folder + title + format
figure.set_size_inches(16, 12)
plt.savefig(save_filename, dpi = 100)
os.system("pdfcrop %s %s" % (save_filename, save_filename))
plt.show() | bsd-2-clause |
gfyoung/pandas | pandas/tests/tslibs/test_array_to_datetime.py | 1 | 6085 | from datetime import date, datetime
from dateutil.tz.tz import tzoffset
import numpy as np
import pytest
import pytz
from pandas._libs import iNaT, tslib
from pandas.compat import np_array_datetime64_compat
from pandas import Timestamp
import pandas._testing as tm
@pytest.mark.parametrize(
"data,expected",
[
(
["01-01-2013", "01-02-2013"],
[
"2013-01-01T00:00:00.000000000-0000",
"2013-01-02T00:00:00.000000000-0000",
],
),
(
["Mon Sep 16 2013", "Tue Sep 17 2013"],
[
"2013-09-16T00:00:00.000000000-0000",
"2013-09-17T00:00:00.000000000-0000",
],
),
],
)
def test_parsing_valid_dates(data, expected):
arr = np.array(data, dtype=object)
result, _ = tslib.array_to_datetime(arr)
expected = np_array_datetime64_compat(expected, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"dt_string, expected_tz",
[
["01-01-2013 08:00:00+08:00", 480],
["2013-01-01T08:00:00.000000000+0800", 480],
["2012-12-31T16:00:00.000000000-0800", -480],
["12-31-2012 23:00:00-01:00", -60],
],
)
def test_parsing_timezone_offsets(dt_string, expected_tz):
# All of these datetime strings with offsets are equivalent
# to the same datetime after the timezone offset is added.
arr = np.array(["01-01-2013 00:00:00"], dtype=object)
expected, _ = tslib.array_to_datetime(arr)
arr = np.array([dt_string], dtype=object)
result, result_tz = tslib.array_to_datetime(arr)
tm.assert_numpy_array_equal(result, expected)
assert result_tz is pytz.FixedOffset(expected_tz)
def test_parsing_non_iso_timezone_offset():
dt_string = "01-01-2013T00:00:00.000000000+0000"
arr = np.array([dt_string], dtype=object)
result, result_tz = tslib.array_to_datetime(arr)
expected = np.array([np.datetime64("2013-01-01 00:00:00.000000000")])
tm.assert_numpy_array_equal(result, expected)
assert result_tz is pytz.FixedOffset(0)
def test_parsing_different_timezone_offsets():
# see gh-17697
data = ["2015-11-18 15:30:00+05:30", "2015-11-18 15:30:00+06:30"]
data = np.array(data, dtype=object)
result, result_tz = tslib.array_to_datetime(data)
expected = np.array(
[
datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)),
datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 23400)),
],
dtype=object,
)
tm.assert_numpy_array_equal(result, expected)
assert result_tz is None
@pytest.mark.parametrize(
"data", [["-352.737091", "183.575577"], ["1", "2", "3", "4", "5"]]
)
def test_number_looking_strings_not_into_datetime(data):
# see gh-4601
#
# These strings don't look like datetimes, so
# they shouldn't be attempted to be converted.
arr = np.array(data, dtype=object)
result, _ = tslib.array_to_datetime(arr, errors="ignore")
tm.assert_numpy_array_equal(result, arr)
@pytest.mark.parametrize(
"invalid_date",
[
date(1000, 1, 1),
datetime(1000, 1, 1),
"1000-01-01",
"Jan 1, 1000",
np.datetime64("1000-01-01"),
],
)
@pytest.mark.parametrize("errors", ["coerce", "raise"])
def test_coerce_outside_ns_bounds(invalid_date, errors):
arr = np.array([invalid_date], dtype="object")
kwargs = {"values": arr, "errors": errors}
if errors == "raise":
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(ValueError, match=msg):
tslib.array_to_datetime(**kwargs)
else: # coerce.
result, _ = tslib.array_to_datetime(**kwargs)
expected = np.array([iNaT], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
def test_coerce_outside_ns_bounds_one_valid():
arr = np.array(["1/1/1000", "1/1/2000"], dtype=object)
result, _ = tslib.array_to_datetime(arr, errors="coerce")
expected = [iNaT, "2000-01-01T00:00:00.000000000-0000"]
expected = np_array_datetime64_compat(expected, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("errors", ["ignore", "coerce"])
def test_coerce_of_invalid_datetimes(errors):
arr = np.array(["01-01-2013", "not_a_date", "1"], dtype=object)
kwargs = {"values": arr, "errors": errors}
if errors == "ignore":
# Without coercing, the presence of any invalid
# dates prevents any values from being converted.
result, _ = tslib.array_to_datetime(**kwargs)
tm.assert_numpy_array_equal(result, arr)
else: # coerce.
# With coercing, the invalid dates becomes iNaT
result, _ = tslib.array_to_datetime(arr, errors="coerce")
expected = ["2013-01-01T00:00:00.000000000-0000", iNaT, iNaT]
tm.assert_numpy_array_equal(
result, np_array_datetime64_compat(expected, dtype="M8[ns]")
)
def test_to_datetime_barely_out_of_bounds():
# see gh-19382, gh-19529
#
# Close enough to bounds that dropping nanos
# would result in an in-bounds datetime.
arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)
msg = "Out of bounds nanosecond timestamp: 2262-04-11 23:47:16"
with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):
tslib.array_to_datetime(arr)
class SubDatetime(datetime):
pass
@pytest.mark.parametrize(
"data,expected",
[
([SubDatetime(2000, 1, 1)], ["2000-01-01T00:00:00.000000000-0000"]),
([datetime(2000, 1, 1)], ["2000-01-01T00:00:00.000000000-0000"]),
([Timestamp(2000, 1, 1)], ["2000-01-01T00:00:00.000000000-0000"]),
],
)
def test_datetime_subclass(data, expected):
# GH 25851
# ensure that subclassed datetime works with
# array_to_datetime
arr = np.array(data, dtype=object)
result, _ = tslib.array_to_datetime(arr)
expected = np_array_datetime64_compat(expected, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
cokelaer/spectrum | doc/sphinxext/sphinx_gallery/notebook.py | 9 | 3565 | # -*- coding: utf-8 -*-
r"""
============================
Parser for Jupyter notebooks
============================
Class that holds the Ipython notebook information
"""
# Author: Óscar Nájera
# License: 3-clause BSD
from __future__ import division, absolute_import, print_function
import json
import os
import re
import sys
def ipy_notebook_skeleton():
"""Returns a dictionary with the elements of a Jupyter notebook"""
py_version = sys.version_info
notebook_skeleton = {
"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python " + str(py_version[0]),
"language": "python",
"name": "python" + str(py_version[0])
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": py_version[0]
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython" + str(py_version[0]),
"version": '{0}.{1}.{2}'.format(*sys.version_info[:3])
}
},
"nbformat": 4,
"nbformat_minor": 0
}
return notebook_skeleton
def rst2md(text):
"""Converts the RST text from the examples docstrigs and comments
into markdown text for the IPython notebooks"""
top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M)
text = re.sub(top_heading, r'# \1', text)
math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M)
text = re.sub(math_eq,
lambda match: r'$${0}$$'.format(match.group(1).strip()),
text)
inline_math = re.compile(r':math:`(.+)`')
text = re.sub(inline_math, r'$\1$', text)
return text
class Notebook(object):
"""Ipython notebook object
Constructs the file cell-by-cell and writes it at the end"""
def __init__(self, file_name, target_dir):
"""Declare the skeleton of the notebook
Parameters
----------
file_name : str
original script file name, .py extension will be renamed
target_dir: str
directory where notebook file is to be saved
"""
self.file_name = file_name.replace('.py', '.ipynb')
self.write_file = os.path.join(target_dir, self.file_name)
self.work_notebook = ipy_notebook_skeleton()
self.add_code_cell("%matplotlib inline")
def add_code_cell(self, code):
"""Add a code cell to the notebook
Parameters
----------
code : str
Cell content
"""
code_cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {"collapsed": False},
"outputs": [],
"source": [code.strip()]
}
self.work_notebook["cells"].append(code_cell)
def add_markdown_cell(self, text):
"""Add a markdown cell to the notebook
Parameters
----------
code : str
Cell content
"""
markdown_cell = {
"cell_type": "markdown",
"metadata": {},
"source": [rst2md(text)]
}
self.work_notebook["cells"].append(markdown_cell)
def save_file(self):
"""Saves the notebook to a file"""
with open(self.write_file, 'w') as out_nb:
json.dump(self.work_notebook, out_nb, indent=2)
| bsd-3-clause |
hammerlab/isovar | setup.py | 1 | 3085 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (absolute_import,)
import os
import logging
import re
from setuptools import setup, find_packages
readme_dir = os.path.dirname(__file__)
readme_path = os.path.join(readme_dir, 'README.md')
try:
with open(readme_path, 'r') as f:
readme_markdown = f.read()
except:
logging.warning("Failed to load %s" % readme_path)
readme_markdown = ""
with open('isovar/__init__.py', 'r') as f:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(),
re.MULTILINE).group(1)
if not version:
raise RuntimeError("Cannot find version information")
if __name__ == '__main__':
setup(
name='isovar',
version=version,
description="Determine mutant protein sequences from RN using assembly around variants",
author="Alex Rubinsteyn, Arman Aksoy, Julia Kodysh",
author_email="[email protected]",
url="https://github.com/openvax/isovar",
license="http://www.apache.org/licenses/LICENSE-2.0.html",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
install_requires=[
'six',
'pysam>=0.15.2',
'pandas',
'varcode>=0.9.0',
'pyensembl>=1.5.0',
'cached_property>=1.5.1',
'psutil',
],
long_description=readme_markdown,
long_description_content_type='text/markdown',
packages=find_packages(),
package_data={'isovar': ['logging.conf']},
entry_points={
'console_scripts': [
'isovar=isovar.cli.isovar_main:run',
'isovar-protein-sequences=isovar.cli.isovar_protein_sequences:run',
"isovar-translations=isovar.cli.isovar_translations:run",
"isovar-reference-contexts=isovar.cli.isovar_reference_contexts:run",
"isovar-allele-reads=isovar.cli.isovar_allele_reads:run",
"isovar-allele-counts=isovar.cli.isovar_allele_counts:run",
"isovar-variant-reads=isovar.cli.isovar_variant_reads:run",
"isovar-variant-sequences=isovar.cli.isovar_variant_sequences:run",
]
}
)
| apache-2.0 |
yhat/ggplot | ggplot/geoms/geom_bar.py | 1 | 4597 | from .geom import geom
import pandas as pd
import matplotlib.patches as patches
class geom_bar(geom):
"""
Bar chart
Parameters
----------
x:
x values for bins/categories
color:
color of the outer line
alpha:
transparency of fill
size:
thickness of outer line
linetype:
type of the outer line ('solid', 'dashed', 'dashdot', 'dotted')
fill:
color the interior of the bar will be
Examples
--------
"""
DEFAULT_AES = {'alpha': None, 'color': None, 'fill': '#333333',
'linetype': 'solid', 'size': 1.0}
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {"width": 0.8}
_aes_renames = {'linetype': 'linestyle', 'size': 'linewidth',
'fill': 'color', 'color': 'edgecolor'}
def setup_data(self, data, _aes, facets=None):
(data, _aes) = self._update_data(data, _aes)
x_col = _aes['x']
weight_col = _aes.get('weight')
if not weight_col:
if '__weight__' not in data:
data.insert(0, '__weight__', 1)
weight_col = '__weight__'
else:
data['__weight__'] = data[weight_col]
weight_col = '__weight__'
fill_col = _aes.get('fill')
if fill_col:
fill_col = [fill_col]
else:
fill_col = []
groupers = [x_col]
if facets:
if facets.rowvar:
groupers.append(facets.rowvar)
if facets.colvar:
groupers.append(facets.colvar)
dfa = (data[groupers + fill_col + [weight_col]].groupby(groupers + fill_col).sum()).reset_index()
dfb = (data[groupers + [weight_col]].groupby(groupers).sum()).reset_index()
df = pd.merge(dfa, dfb, on=groupers)
df.rename(columns={'__weight___x': '__weight__', '__weight___y': '__total_weight__'}, inplace=True)
if self.params.get('position')=='fill':
df['__calc_weight__'] = df['__weight__'] / df['__total_weight__']
else:
df['__calc_weight__'] = df['__weight__']
return df
def plot(self, ax, data, _aes, x_levels, fill_levels, lookups):
(data, _aes) = self._update_data(data, _aes)
variables = _aes.data
weight_col = _aes.get('weight')
x_levels = sorted(x_levels)
if not weight_col:
if '__weight__' not in data:
data.insert(0, '__weight__', 1.0)
weight_col = '__weight__'
params = self._get_plot_args(data, _aes)
if fill_levels is not None:
width = self.params["width"] / len(fill_levels)
else:
width = self.params["width"]
padding = width / 2
xticks = []
for i, x_level in enumerate(x_levels):
mask = data[variables['x']]==x_level
row = data[mask]
if len(row)==0:
xticks.append(i)
continue
if fill_levels is not None:
fillval = row[variables['fill']].iloc[0]
fill_idx = fill_levels.tolist().index(fillval)
fill_x_adjustment = width * len(fill_levels)/2.
else:
fill_x_adjustment = width / 2
if self.params.get('position') in ('stack', 'fill'):
dodge = 0.0
fill_x_adjustment = width / 2
if fill_levels is None:
height = 1.0
ypos = 0
else:
mask = (lookups[variables['x']]==x_level) & (lookups[variables['fill']]==fillval)
height = lookups[mask]['__calc_weight__'].sum()
mask = (lookups[variables['x']]==x_level) & (lookups[variables['fill']] < fillval)
ypos = lookups[mask]['__calc_weight__'].sum()
else:
if fill_levels is not None:
dodge = (width * fill_idx)
else:
dodge = width
ypos = 0.0
height = row[weight_col].sum()
xy = (dodge + i - fill_x_adjustment, ypos)
ax.add_patch(patches.Rectangle(xy, width, height, **params))
if fill_levels is not None:
xticks.append(i)
else:
xticks.append(i + dodge)
# need this b/c we're using patches
ax.autoscale_view()
# this will happen multiple times, but it's ok b/c it'll be the same each time
ax.set_xticks(xticks)
ax.set_xticklabels(x_levels)
| bsd-2-clause |
RedhawkSDR/integration-gnuhawk | gnuradio/gr-filter/examples/reconstruction.py | 12 | 4824 | #!/usr/bin/env python
#
# Copyright 2010,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital
from gnuradio import filter
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
fftlen = 8192
def main():
N = 10000
fs = 2000.0
Ts = 1.0/fs
t = scipy.arange(0, N*Ts, Ts)
# When playing with the number of channels, be careful about the filter
# specs and the channel map of the synthesizer set below.
nchans = 10
# Build the filter(s)
bw = 1000
tb = 400
proto_taps = filter.firdes.low_pass_2(1, nchans*fs,
bw, tb, 80,
filter.firdes.WIN_BLACKMAN_hARRIS)
print "Filter length: ", len(proto_taps)
# Create a modulated signal
npwr = 0.01
data = scipy.random.randint(0, 256, N)
rrc_taps = filter.firdes.root_raised_cosine(1, 2, 1, 0.35, 41)
src = gr.vector_source_b(data.astype(scipy.uint8).tolist(), False)
mod = digital.bpsk_mod(samples_per_symbol=2)
chan = gr.channel_model(npwr)
rrc = filter.fft_filter_ccc(1, rrc_taps)
# Split it up into pieces
channelizer = filter.pfb.channelizer_ccf(nchans, proto_taps, 2)
# Put the pieces back together again
syn_taps = [nchans*t for t in proto_taps]
synthesizer = filter.pfb_synthesizer_ccf(nchans, syn_taps, True)
src_snk = gr.vector_sink_c()
snk = gr.vector_sink_c()
# Remap the location of the channels
# Can be done in synth or channelizer (watch out for rotattions in
# the channelizer)
synthesizer.set_channel_map([ 0, 1, 2, 3, 4,
15, 16, 17, 18, 19])
tb = gr.top_block()
tb.connect(src, mod, chan, rrc, channelizer)
tb.connect(rrc, src_snk)
vsnk = []
for i in xrange(nchans):
tb.connect((channelizer,i), (synthesizer, i))
vsnk.append(gr.vector_sink_c())
tb.connect((channelizer,i), vsnk[i])
tb.connect(synthesizer, snk)
tb.run()
sin = scipy.array(src_snk.data()[1000:])
sout = scipy.array(snk.data()[1000:])
# Plot original signal
fs_in = nchans*fs
f1 = pylab.figure(1, figsize=(16,12), facecolor='w')
s11 = f1.add_subplot(2,2,1)
s11.psd(sin, NFFT=fftlen, Fs=fs_in)
s11.set_title("PSD of Original Signal")
s11.set_ylim([-200, -20])
s12 = f1.add_subplot(2,2,2)
s12.plot(sin.real[1000:1500], "o-b")
s12.plot(sin.imag[1000:1500], "o-r")
s12.set_title("Original Signal in Time")
start = 1
skip = 4
s13 = f1.add_subplot(2,2,3)
s13.plot(sin.real[start::skip], sin.imag[start::skip], "o")
s13.set_title("Constellation")
s13.set_xlim([-2, 2])
s13.set_ylim([-2, 2])
# Plot channels
nrows = int(scipy.sqrt(nchans))
ncols = int(scipy.ceil(float(nchans)/float(nrows)))
f2 = pylab.figure(2, figsize=(16,12), facecolor='w')
for n in xrange(nchans):
s = f2.add_subplot(nrows, ncols, n+1)
s.psd(vsnk[n].data(), NFFT=fftlen, Fs=fs_in)
s.set_title("Channel {0}".format(n))
s.set_ylim([-200, -20])
# Plot reconstructed signal
fs_out = 2*nchans*fs
f3 = pylab.figure(3, figsize=(16,12), facecolor='w')
s31 = f3.add_subplot(2,2,1)
s31.psd(sout, NFFT=fftlen, Fs=fs_out)
s31.set_title("PSD of Reconstructed Signal")
s31.set_ylim([-200, -20])
s32 = f3.add_subplot(2,2,2)
s32.plot(sout.real[1000:1500], "o-b")
s32.plot(sout.imag[1000:1500], "o-r")
s32.set_title("Reconstructed Signal in Time")
start = 2
skip = 4
s33 = f3.add_subplot(2,2,3)
s33.plot(sout.real[start::skip], sout.imag[start::skip], "o")
s33.set_title("Constellation")
s33.set_xlim([-2, 2])
s33.set_ylim([-2, 2])
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
hdmetor/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
TheMutley/openpilot | selfdrive/test/tests/plant/test_longitudinal.py | 1 | 10924 | #!/usr/bin/env python
import os
os.environ['OLD_CAN'] = '1'
os.environ['NOCRASH'] = '1'
import time
import unittest
import shutil
import matplotlib
matplotlib.use('svg')
from selfdrive.config import Conversions as CV
from selfdrive.car.honda.values import CruiseButtons as CB
from selfdrive.test.plant.maneuver import Maneuver
import selfdrive.manager as manager
from common.params import Params
def create_dir(path):
try:
os.makedirs(path)
except OSError:
pass
maneuvers = [
Maneuver(
'while cruising at 40 mph, change cruise speed to 50mph',
duration=30.,
initial_speed = 40. * CV.MPH_TO_MS,
cruise_button_presses = [(CB.DECEL_SET, 2.), (0, 2.3),
(CB.RES_ACCEL, 10.), (0, 10.1),
(CB.RES_ACCEL, 10.2), (0, 10.3)]
),
Maneuver(
'while cruising at 60 mph, change cruise speed to 50mph',
duration=30.,
initial_speed=60. * CV.MPH_TO_MS,
cruise_button_presses = [(CB.DECEL_SET, 2.), (0, 2.3),
(CB.DECEL_SET, 10.), (0, 10.1),
(CB.DECEL_SET, 10.2), (0, 10.3)]
),
Maneuver(
'while cruising at 20mph, grade change +10%',
duration=25.,
initial_speed=20. * CV.MPH_TO_MS,
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],
grade_values = [0., 0., 1.0],
grade_breakpoints = [0., 10., 11.]
),
Maneuver(
'while cruising at 20mph, grade change -10%',
duration=25.,
initial_speed=20. * CV.MPH_TO_MS,
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)],
grade_values = [0., 0., -1.0],
grade_breakpoints = [0., 10., 11.]
),
Maneuver(
'approaching a 40mph car while cruising at 60mph from 100m away',
duration=30.,
initial_speed = 60. * CV.MPH_TO_MS,
lead_relevancy=True,
initial_distance_lead=100.,
speed_lead_values = [40.*CV.MPH_TO_MS, 40.*CV.MPH_TO_MS],
speed_lead_breakpoints = [0., 100.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)]
),
Maneuver(
'approaching a 0mph car while cruising at 40mph from 150m away',
duration=30.,
initial_speed = 40. * CV.MPH_TO_MS,
lead_relevancy=True,
initial_distance_lead=150.,
speed_lead_values = [0.*CV.MPH_TO_MS, 0.*CV.MPH_TO_MS],
speed_lead_breakpoints = [0., 100.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)]
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 1m/s^2',
duration=50.,
initial_speed = 20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values = [20., 20., 0.],
speed_lead_breakpoints = [0., 15., 35.0],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)]
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 2m/s^2',
duration=50.,
initial_speed = 20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values = [20., 20., 0.],
speed_lead_breakpoints = [0., 15., 25.0],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)]
),
Maneuver(
'steady state following a car at 20m/s, then lead decel to 0mph at 3m/s^2',
duration=50.,
initial_speed = 20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values = [20., 20., 0.],
speed_lead_breakpoints = [0., 15., 21.66],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3)]
),
Maneuver(
'starting at 0mph, approaching a stopped car 100m away',
duration=30.,
initial_speed = 0.,
lead_relevancy=True,
initial_distance_lead=100.,
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9)]
),
Maneuver(
"following a car at 60mph, lead accel and decel at 0.5m/s^2 every 2s",
duration=25.,
initial_speed=30.,
lead_relevancy=True,
initial_distance_lead=49.,
speed_lead_values=[30.,30.,29.,31.,29.,31.,29.],
speed_lead_breakpoints=[0., 6., 8., 12.,16.,20.,24.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)]
),
Maneuver(
"following a car at 10mph, stop and go at 1m/s2 lead dece1 and accel",
duration=70.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0.,10.],
speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)]
),
Maneuver(
"green light: stopped behind lead car, lead car accelerates at 1.5 m/s",
duration=30.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=4.,
speed_lead_values=[0, 0 , 45],
speed_lead_breakpoints=[0, 10., 40.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)]
),
Maneuver(
"stop and go with 1m/s2 lead decel and accel, with full stops",
duration=70.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0., 0.] ,
speed_lead_breakpoints=[10., 20., 30., 40., 50., 60.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)]
),
Maneuver(
"stop and go with 1.5m/s2 lead accel and 3.3m/s^2 lead decel, with full stops",
duration=45.,
initial_speed=0.,
lead_relevancy=True,
initial_distance_lead=20.,
speed_lead_values=[10., 0., 0., 10., 0., 0.] ,
speed_lead_breakpoints=[10., 13., 26., 33., 36., 45.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7)]
),
Maneuver(
"accelerate from 20 while lead vehicle decelerates from 40 to 20 at 1m/s2",
duration=30.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=10.,
speed_lead_values=[20., 10.],
speed_lead_breakpoints=[1., 11.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)]
),
Maneuver(
"accelerate from 20 while lead vehicle decelerates from 40 to 0 at 2m/s2",
duration=30.,
initial_speed=10.,
lead_relevancy=True,
initial_distance_lead=10.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[1., 11.],
cruise_button_presses = [(CB.DECEL_SET, 1.2), (0, 1.3),
(CB.RES_ACCEL, 1.4), (0.0, 1.5),
(CB.RES_ACCEL, 1.6), (0.0, 1.7),
(CB.RES_ACCEL, 1.8), (0.0, 1.9),
(CB.RES_ACCEL, 2.0), (0.0, 2.1),
(CB.RES_ACCEL, 2.2), (0.0, 2.3)]
),
Maneuver(
"fcw: traveling at 30 m/s and approaching lead traveling at 20m/s",
duration=15.,
initial_speed=30.,
lead_relevancy=True,
initial_distance_lead=100.,
speed_lead_values=[20.],
speed_lead_breakpoints=[1.],
cruise_button_presses = []
),
Maneuver(
"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 1m/s2",
duration=18.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[3., 23.],
cruise_button_presses = []
),
Maneuver(
"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 3m/s2",
duration=13.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[3., 9.6],
cruise_button_presses = []
),
Maneuver(
"fcw: traveling at 20 m/s following a lead that decels from 20m/s to 0 at 5m/s2",
duration=8.,
initial_speed=20.,
lead_relevancy=True,
initial_distance_lead=35.,
speed_lead_values=[20., 0.],
speed_lead_breakpoints=[3., 7.],
cruise_button_presses = []
)
]
#maneuvers = [maneuvers[-1]]
def setup_output():
output_dir = os.path.join(os.getcwd(), 'out/longitudinal')
if not os.path.exists(os.path.join(output_dir, "index.html")):
# write test output header
css_style = """
.maneuver_title {
font-size: 24px;
text-align: center;
}
.maneuver_graph {
width: 100%;
}
"""
view_html = "<html><head><style>%s</style></head><body><table>" % (css_style,)
for i, man in enumerate(maneuvers):
view_html += "<tr><td class='maneuver_title' colspan=5><div>%s</div></td></tr><tr>" % (man.title,)
for c in ['distance.svg', 'speeds.svg', 'acceleration.svg', 'pedals.svg', 'pid.svg']:
view_html += "<td><img class='maneuver_graph' src='%s'/></td>" % (os.path.join("maneuver" + str(i+1).zfill(2), c), )
view_html += "</tr>"
create_dir(output_dir)
with open(os.path.join(output_dir, "index.html"), "w") as f:
f.write(view_html)
class LongitudinalControl(unittest.TestCase):
@classmethod
def setUpClass(cls):
setup_output()
shutil.rmtree('/data/params', ignore_errors=True)
params = Params()
params.put("Passive", "1" if os.getenv("PASSIVE") else "0")
params.put("IsFcwEnabled", "1")
manager.gctx = {}
manager.prepare_managed_process('radard')
manager.prepare_managed_process('controlsd')
manager.start_managed_process('radard')
manager.start_managed_process('controlsd')
@classmethod
def tearDownClass(cls):
manager.kill_managed_process('radard')
manager.kill_managed_process('controlsd')
time.sleep(5)
# hack
def test_longitudinal_setup(self):
pass
WORKERS = 8
def run_maneuver_worker(k):
output_dir = os.path.join(os.getcwd(), 'out/longitudinal')
for i, man in enumerate(maneuvers[k::WORKERS]):
score, plot = man.evaluate()
plot.write_plot(output_dir, "maneuver" + str(WORKERS * i + k+1).zfill(2))
for k in xrange(WORKERS):
setattr(LongitudinalControl,
"test_longitudinal_maneuvers_%d" % (k+1),
lambda self, k=k: run_maneuver_worker(k))
if __name__ == "__main__":
unittest.main()
| mit |
rrohan/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
some-0n3/imgcls_utils | plotting.py | 1 | 8197 | """Plotting utilities.
This module contains several utilities for plotting using the
:mod:`pylab` and :mod:`matplotlib` packages. This includes
functions/iterators to create nested subplots and a normalizer
to create heatmaps.
The following code
>>> outer = hsplit(2, 1)
>>> next(outer)
>>> plot_img()
>>> inner = vsplit(1, 1,parent=next(outer))
>>> next(inner)
>>> plot_img()
>>> next(inner)
>>> plot_img()
will create a plot with the following layout
::
+-------+
| * * |
| * * |
+---+---+
| * | * |
+---+---+
This example will create a 10x10 plot with images
>>> for image, _ in square_griter(images[:100]):
>>> plot_img(image)
"""
from collections import namedtuple
from itertools import product
from math import ceil
import numpy
from matplotlib import pylab
from matplotlib.colors import Normalize
# from http://matplotlib.org/users/colormapnorms.html
class MidpointNormalize(Normalize):
"""Normalize the negative and positive data differently.
This class is a normalizer for plotting, usually passed as ``norm``
parameter to the ``plot`` function. The code was taken from the
`matplotlib's User’s Guide
<http://matplotlib.org/users/colormapnorms.html#custom-normalization-two-linear-ranges>`_
It is mostly used for heatmaps.
"""
def __init__(self, vmin=None, vmax=None, midpoint=0.0, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return numpy.ma.masked_array(numpy.interp(value, x, y))
Parent = namedtuple('Parent', ['rows', 'columns', 'row_pos', 'col_pos',
'width', 'height', 'axis'])
def iter_grid(rows, columns, parent=None):
"""Iterate over (nested) subplots.
The iterator will call ``pylab``'s subplot function and yield a
object to create nested subplots. If such a object is passed as
parent parameter, the iterator will create subplots in it's
parent-plots.
Parameters
----------
rows : integer
The number of rows in the grid.
columns : integer
The number of columns in the grid-
parent : a :class:`Parent` instance or ``None`` (``None``)
If not ``None``, then this is an object to generate
nested subplots.
Yields
------
a :class:`Parent` instance
An object containing information for generating subplot via this
module's functions. It also has an attribute called ``axis``
that contains the result of the subplot function.
"""
if parent is None:
shape = rows, columns
width, height = 1, 1
off_row, off_col = 0, 0
else:
shape = parent.rows * rows, parent.columns * columns
width, height = parent.width, parent.height
off_row, off_col = parent.row_pos * rows, parent.col_pos * columns
for i_row, i_col in product(range(rows), range(columns)):
row_pos = off_row + i_row * width
col_pos = off_col + i_col * height
axis = pylab.subplot2grid(shape, loc=(row_pos, col_pos))
yield Parent(*shape, row_pos, col_pos, width, height, axis)
def square_griter(iterable, size=None, parent=None):
"""Try to make a quadratic grid for an iterator.
Parameters
----------
iterable : iterable
An iterable object to build a grid for.
size : tuple (pair) of integers or ``None`` (``None``)
The size of the grid.
parent : a :class:`Parent` instance or ``None`` (``None``)
If not ``None``, then this is an object to generate
nested subplots.
Yields
------
an object
An object from the iterable.
a :class:`Parent` instance
An object containing information for generating subplot via this
module's functions. It also has an attribute called ``axis``
that contains the result of the subplot function.
"""
if size is None:
iterable = tuple(iterable)
length = len(iterable)
root = ceil(numpy.sqrt(length))
grid = iter_grid(root, ceil(length / root), parent=parent)
else:
grid = iter_grid(*size, parent=parent)
yield from zip(iterable, grid)
def product_griter(row_iter, col_iter, parent=None):
"""Iterate over subplots, span by two iterables.
Parameters
----------
row_iter : iterable
An iterable yielding an object for every row.
col_iter : iterable
An iterable yielding an object for every column.
parent : a :class:`Parent` instance or ``None`` (``None``)
If not ``None``, then this is an object to generate
nested subplots.
Yields
------
an object
An object from the row-iterable.
an object
An object from the column-iterable.
a :class:`Parent` instance
An object containing information for generating subplot via this
module's functions. It also has an attribute called ``axis``
that contains the result of the subplot function.
"""
rows = tuple(row_iter)
columns = tuple(col_iter)
griter = iter_grid(len(rows), len(columns), parent=parent)
for grid, (row, colum) in zip(griter, product(rows, columns)):
yield row, colum, grid
def vsplit(*splits, parent=None):
"""Vertically split a plot with different sizes.
Parameters
----------
splits : number of integers
Each number describes the width of the column.
The iterator ``vsplit(2, 1)`` will yield two subplots, where
the first is left to the second one and is twice as big.
parent : a :class:`Parent` instance or ``None`` (``None``)
If not ``None``, then this is an object to generate
nested subplots.
Yields
------
a :class:`Parent` instance
An object containing information for generating subplot via this
module's functions. It also has an attribute called ``axis``
that contains the result of the subplot function.
"""
columns = sum(splits)
if parent is None:
shape = 1, columns
width, height = 1, 1
off_row, off_col = 0, 0
else:
shape = parent.rows, parent.columns * columns
width, height = parent.width, parent.height
off_row, off_col = parent.row_pos, parent.col_pos * columns
location = 0
for split in (s * width for s in splits):
col_pos = off_col + location
axis = pylab.subplot2grid(shape, loc=(off_row, col_pos),
colspan=split, rowspan=height)
yield Parent(*shape, off_row, col_pos, split, height, axis)
location += split
def hsplit(*splits, parent=None):
"""Horizontally split a plot with different sizes.
Parameters
----------
splits : number of integers
Each number describes the height of the rows.
The iterator ``hsplit(2, 1)`` will yield two subplots, where
the first is above the second and twice as big.
parent : a :class:`Parent` instance or ``None`` (``None``)
If not ``None``, then this is an object to generate
nested subplots.
Yields
------
a :class:`Parent` instance
An object containing information for generating subplot via this
module's functions. It also has an attribute called ``axis``
that contains the result of the subplot function.
"""
rows = sum(splits)
if parent is None:
shape = rows, 1
width, height = 1, 1
off_row, off_col = 0, 0
else:
shape = parent.rows * rows, parent.columns
width, height = parent.width, parent.height
off_row, off_col = parent.row_pos * rows, parent.col_pos
location = 0
for split in (s * height for s in splits):
row_pos = off_row + location
axis = pylab.subplot2grid(shape, loc=(row_pos, off_col),
colspan=width, rowspan=split)
yield Parent(*shape, row_pos, off_col, width, split, axis)
location += split
| mit |
datapythonista/pandas | pandas/tests/io/test_spss.py | 7 | 2745 | from pathlib import Path
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
pyreadstat = pytest.importorskip("pyreadstat")
@pytest.mark.parametrize("path_klass", [lambda p: p, Path])
def test_spss_labelled_num(path_klass, datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = path_klass(datapath("io", "data", "spss", "labelled-num.sav"))
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"VAR00002": "This is one"}, index=[0])
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"VAR00002": 1.0}, index=[0])
tm.assert_frame_equal(df, expected)
def test_spss_labelled_num_na(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "spss", "labelled-num-na.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"VAR00002": ["This is one", None]})
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"VAR00002": [1.0, np.nan]})
tm.assert_frame_equal(df, expected)
def test_spss_labelled_str(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "spss", "labelled-str.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"gender": ["Male", "Female"]})
expected["gender"] = pd.Categorical(expected["gender"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"gender": ["M", "F"]})
tm.assert_frame_equal(df, expected)
def test_spss_umlauts(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "spss", "umlauts.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame(
{"var1": ["the ä umlaut", "the ü umlaut", "the ä umlaut", "the ö umlaut"]}
)
expected["var1"] = pd.Categorical(expected["var1"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]})
tm.assert_frame_equal(df, expected)
def test_spss_usecols(datapath):
# usecols must be list-like
fname = datapath("io", "data", "spss", "labelled-num.sav")
with pytest.raises(TypeError, match="usecols must be list-like."):
pd.read_spss(fname, usecols="VAR00002")
| bsd-3-clause |
great-expectations/great_expectations | tests/profile/test_profile.py | 1 | 14878 | import os
import pytest
import great_expectations.exceptions as ge_exceptions
from great_expectations.dataset.pandas_dataset import PandasDataset
from great_expectations.datasource import PandasDatasource
from great_expectations.profile.base import DatasetProfiler, Profiler
from great_expectations.profile.basic_dataset_profiler import BasicDatasetProfiler
from great_expectations.profile.columns_exist import ColumnsExistProfiler
def test_base_class_not_instantiable_due_to_abstract_methods():
with pytest.raises(TypeError):
Profiler()
def test_DataSetProfiler_methods():
toy_dataset = PandasDataset({"x": [1, 2, 3]})
assert DatasetProfiler.validate(1) == False
assert DatasetProfiler.validate(toy_dataset)
with pytest.raises(NotImplementedError):
DatasetProfiler.profile(toy_dataset)
def test_ColumnsExistProfiler():
toy_dataset = PandasDataset({"x": [1, 2, 3]})
expectations_config, evr_config = ColumnsExistProfiler.profile(toy_dataset)
assert len(expectations_config.expectations) == 1
assert (
expectations_config.expectations[0].expectation_type == "expect_column_to_exist"
)
assert expectations_config.expectations[0].kwargs["column"] == "x"
def test_BasicDatasetProfiler():
toy_dataset = PandasDataset(
{"x": [1, 2, 3]},
)
assert (
len(toy_dataset.get_expectation_suite(suppress_warnings=True).expectations) == 0
)
expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset)
assert (
len(toy_dataset.get_expectation_suite(suppress_warnings=True).expectations) > 0
)
assert "BasicDatasetProfiler" in expectations_config.meta
assert set(expectations_config.meta["BasicDatasetProfiler"].keys()) == {
"created_by",
"created_at",
"batch_kwargs",
}
assert "notes" in expectations_config.meta
assert set(expectations_config.meta["notes"].keys()) == {"format", "content"}
assert "To add additional notes" in expectations_config.meta["notes"]["content"][0]
added_expectations = set()
for exp in expectations_config.expectations:
added_expectations.add(exp.expectation_type)
assert "BasicDatasetProfiler" in exp.meta
assert "confidence" in exp.meta["BasicDatasetProfiler"]
expected_expectations = {
"expect_table_row_count_to_be_between",
"expect_table_columns_to_match_ordered_list",
"expect_column_values_to_be_in_set",
"expect_column_unique_value_count_to_be_between",
"expect_column_proportion_of_unique_values_to_be_between",
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_in_type_list",
"expect_column_values_to_be_unique",
}
assert expected_expectations.issubset(added_expectations)
def test_BasicDatasetProfiler_null_column():
"""
The profiler should determine that null columns are of null cardinality and of null type and
not to generate expectations specific to types and cardinality categories.
We verify this by running the basic profiler on a Pandas dataset with an empty column
and asserting the number of successful results for the empty columns.
"""
toy_dataset = PandasDataset({"x": [1, 2, 3], "y": [None, None, None]})
assert (
len(toy_dataset.get_expectation_suite(suppress_warnings=True).expectations) == 0
)
expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset)
# TODO: assert set - specific expectations
assert (
len(
[
result
for result in evr_config["results"]
if result.expectation_config["kwargs"].get("column") == "y"
and result.success
]
)
== 4
)
assert len(
[
result
for result in evr_config["results"]
if result.expectation_config["kwargs"].get("column") == "y"
and result.success
]
) < len(
[
result
for result in evr_config["results"]
if result.expectation_config["kwargs"].get("column") == "x"
and result.success
]
)
def test_BasicDatasetProfiler_partially_null_column(dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"nulls" is the partially null column in the fixture dataset
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(dataset)
assert {
"expect_column_to_exist",
"expect_column_values_to_be_in_type_list",
"expect_column_unique_value_count_to_be_between",
"expect_column_proportion_of_unique_values_to_be_between",
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_in_set",
"expect_column_values_to_be_unique",
} == {
expectation.expectation_type
for expectation in expectations_config.expectations
if expectation.kwargs.get("column") == "nulls"
}
def test_BasicDatasetProfiler_non_numeric_low_cardinality(non_numeric_low_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(
non_numeric_low_card_dataset
)
assert {
"expect_column_to_exist",
"expect_column_values_to_be_in_type_list",
"expect_column_unique_value_count_to_be_between",
"expect_column_distinct_values_to_be_in_set",
"expect_column_proportion_of_unique_values_to_be_between",
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_in_set",
"expect_column_values_to_not_match_regex",
} == {
expectation.expectation_type
for expectation in expectations_config.expectations
if expectation.kwargs.get("column") == "lowcardnonnum"
}
def test_BasicDatasetProfiler_non_numeric_high_cardinality(
non_numeric_high_card_dataset,
):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(
non_numeric_high_card_dataset
)
assert {
"expect_column_to_exist",
"expect_column_values_to_be_in_type_list",
"expect_column_unique_value_count_to_be_between",
"expect_column_proportion_of_unique_values_to_be_between",
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_in_set",
"expect_column_values_to_not_match_regex",
} == {
expectation.expectation_type
for expectation in expectations_config.expectations
if expectation.kwargs.get("column") == "highcardnonnum"
}
def test_BasicDatasetProfiler_numeric_high_cardinality(numeric_high_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(
numeric_high_card_dataset
)
assert {
"expect_column_to_exist",
"expect_table_row_count_to_be_between",
"expect_table_columns_to_match_ordered_list",
"expect_column_values_to_be_in_type_list",
"expect_column_unique_value_count_to_be_between",
"expect_column_proportion_of_unique_values_to_be_between",
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_in_set",
"expect_column_values_to_be_unique",
} == {
expectation.expectation_type for expectation in expectations_config.expectations
}
def test_BasicDatasetProfiler_with_context(filesystem_csv_data_context):
context = filesystem_csv_data_context
context.create_expectation_suite("default")
datasource = context.datasources["rad_datasource"]
base_dir = datasource.config["batch_kwargs_generators"]["subdir_reader"][
"base_directory"
]
batch_kwargs = {
"datasource": "rad_datasource",
"path": os.path.join(base_dir, "f1.csv"),
}
batch = context.get_batch(batch_kwargs, "default")
expectation_suite, validation_results = BasicDatasetProfiler.profile(batch)
assert expectation_suite.expectation_suite_name == "default"
assert "BasicDatasetProfiler" in expectation_suite.meta
assert set(expectation_suite.meta["BasicDatasetProfiler"].keys()) == {
"created_by",
"created_at",
"batch_kwargs",
}
assert (
expectation_suite.meta["BasicDatasetProfiler"]["batch_kwargs"] == batch_kwargs
)
for exp in expectation_suite.expectations:
assert "BasicDatasetProfiler" in exp.meta
assert "confidence" in exp.meta["BasicDatasetProfiler"]
assert set(validation_results.meta.keys()) == {
"batch_kwargs",
"batch_markers",
"batch_parameters",
"expectation_suite_name",
"great_expectations_version",
"run_id",
"validation_time",
}
def test_context_profiler(filesystem_csv_data_context):
"""
This just validates that it's possible to profile using the datasource hook,
and have validation results available in the DataContext
"""
context = filesystem_csv_data_context
assert isinstance(context.datasources["rad_datasource"], PandasDatasource)
assert context.list_expectation_suites() == []
context.profile_datasource("rad_datasource", profiler=BasicDatasetProfiler)
assert len(context.list_expectation_suites()) == 1
expected_suite_name = "rad_datasource.subdir_reader.f1.BasicDatasetProfiler"
profiled_expectations = context.get_expectation_suite(expected_suite_name)
for exp in profiled_expectations.expectations:
assert "BasicDatasetProfiler" in exp.meta
assert "confidence" in exp.meta["BasicDatasetProfiler"]
assert profiled_expectations.expectation_suite_name == expected_suite_name
assert "batch_kwargs" in profiled_expectations.meta["BasicDatasetProfiler"]
assert len(profiled_expectations.expectations) == 8
def test_context_profiler_with_data_asset_name(filesystem_csv_data_context):
"""
If a valid data asset name is passed to the profiling method
in the data_assets argument, the profiling method profiles only this data asset
"""
context = filesystem_csv_data_context
assert isinstance(context.datasources["rad_datasource"], PandasDatasource)
assert context.list_expectation_suites() == []
profiling_result = context.profile_datasource(
"rad_datasource", data_assets=["f1"], profiler=BasicDatasetProfiler
)
assert profiling_result["success"] == True
assert len(profiling_result["results"]) == 1
assert (
profiling_result["results"][0][0].expectation_suite_name
== "rad_datasource.subdir_reader.f1.BasicDatasetProfiler"
)
def test_context_profiler_with_nonexisting_data_asset_name(filesystem_csv_data_context):
"""
If a non-existing data asset name is passed to the profiling method
in the data_assets argument, the profiling method must return an error
code in the result and the names of the unrecognized assets
"""
context = filesystem_csv_data_context
assert isinstance(context.datasources["rad_datasource"], PandasDatasource)
assert context.list_expectation_suites() == []
profiling_result = context.profile_datasource(
"rad_datasource",
data_assets=["this_asset_doesnot_exist"],
profiler=BasicDatasetProfiler,
)
assert profiling_result == {
"success": False,
"error": {
"code": 3,
"not_found_data_assets": ["this_asset_doesnot_exist"],
"data_assets": [("f1", "file")],
},
}
def test_context_profiler_with_non_existing_generator(filesystem_csv_data_context):
"""
If a non-existing generator name is passed to the profiling method
in the generator_name argument, the profiling method must raise an exception.
"""
context = filesystem_csv_data_context
assert isinstance(context.datasources["rad_datasource"], PandasDatasource)
assert context.list_expectation_suites() == []
with pytest.raises(ge_exceptions.ProfilerError):
profiling_result = context.profile_datasource(
"rad_datasource",
data_assets=["this_asset_doesnot_exist"],
profiler=BasicDatasetProfiler,
batch_kwargs_generator_name="this_gen_does_not_exist",
)
def test_context_profiler_without_generator_name_arg_on_datasource_with_multiple_generators(
filesystem_csv_data_context, filesystem_csv_2
):
"""
If a no generator_name is passed to the profiling method and the datasource has more than one
generators configured, the profiling method must return an error code in the result
"""
context = filesystem_csv_data_context
context.add_batch_kwargs_generator(
"rad_datasource",
"second_generator",
"SubdirReaderBatchKwargsGenerator",
**{
"base_directory": str(filesystem_csv_2),
}
)
assert isinstance(context.datasources["rad_datasource"], PandasDatasource)
profiling_result = context.profile_datasource(
"rad_datasource",
data_assets=["this_asset_doesnot_exist"],
profiler=BasicDatasetProfiler,
)
assert profiling_result == {"success": False, "error": {"code": 5}}
def test_context_profiler_without_generator_name_arg_on_datasource_with_no_generators(
filesystem_csv_data_context,
):
"""
If a no generator_name is passed to the profiling method and the datasource has no
generators configured, the profiling method must return an error code in the result
"""
context = filesystem_csv_data_context
context.add_datasource(
"datasource_without_generators",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
)
assert isinstance(
context.datasources["datasource_without_generators"], PandasDatasource
)
profiling_result = context.profile_datasource(
"datasource_without_generators", profiler=BasicDatasetProfiler
)
assert profiling_result == {"success": False, "error": {"code": 4}}
| apache-2.0 |
hacpai/show-me-the-code | Python/0053/main.py | 3 | 1869 | import string
import sys
import matplotlib.pyplot as pyplot
def process_file( filename ):
"""Reads a text from a file, counts word freqencies.
Return a dictionary that key-value pairs.
"""
hist = {}
skip_head( filename )
with open( filename, 'r' ) as file_handle:
for line in file_handle:
words = line.replace( '-', ' ' ).lower().strip()
for word in words.split():
word = word.strip( string.whitespace + string.punctuation )
hist[word] = hist.get( word, 0 ) + 1
return hist
def skip_head( filename ):
"""Skips the head information in file.
"""
with open( filename, 'r' ) as file_handle:
for line in file_handle:
if line.startswith( '*END*THE SMALL PRINT!' ):
break
def rank_freq( hist ):
"""Reurrn a list of tuples where each tuple is a rank
and the number of times the item with that rank appeared.
"""
freqs = hist.values()
freqs.sort( reverse = True )
rf = [( r, f ) for r, f in enumerate( freqs, start = 1 )]
return rf
def print_ranks( rank_freq ):
"""Prints the rank vs. frequency data."""
for r, f in rank_freq:
print r, f
def plot_ranks( rank_freq, scale = 'log' ):
"""Plots frequency vs. rank."""
rs, fs = zip( *rank_freq )
pyplot.clf()
pyplot.xscale( scale )
pyplot.yscale( scale )
pyplot.title( 'Zipf plot' )
pyplot.xlabel( 'rank' )
pyplot.ylabel( 'frequency' )
pyplot.plot( rs, fs, 'r-' )
pyplot.show()
def main( name, filename = 'emma.txt', flag = 'plot', *args ):
hist = process_file( filename )
rf = rank_freq( hist )
if flag == 'print':
print_ranks( rf )
elif flag == 'plot':
plot_ranks( rf )
else:
print 'Usage: python main.py filename [print|plot]'
main( *sys.argv )
| gpl-2.0 |
thilbern/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
hanteng/pyCountrySize | pyCountrySize/scipy.stats.linregress_PPPGDP_LP.py | 1 | 1296 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import pyCountrySize
from scipy import stats
(x_picked, y_picked)=("LP", "PPPGDP")
df=pyCountrySize.sizec
df=df.copy()
varx= df[x_picked]
vary= df[y_picked]
#Dealing with missing values
#http://stackoverflow.com/questions/13643363/linear-regression-of-arrays-containing-nans-in-python-numpy
mask = ~np.isnan(varx) & ~np.isnan(vary)
#Running scipy.stats.linregress
#http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.linregress.html
slope_, intercept_, r_value, p_value, std_err = stats.linregress(varx[mask],vary[mask])
print "r-squared:", r_value**2
print "slope_, intercept_:",slope_, intercept_
import statsmodels.api as sm
est = sm.OLS(vary[mask], varx[mask])
est = est.fit()
print est.summary()
import matplotlib.pyplot as plt
X_prime = np.linspace(varx.min(), varx.max(), 100)[:, np.newaxis]
y_hat = est.predict(X_prime)
plt.scatter(varx, vary, alpha=0.3)
plt.xlabel(x_picked)
plt.ylabel(y_picked)
plt.plot(X_prime, y_hat, 'r', alpha=0.9) # Add the regression line, colored in red
#plt.show()
# import formula api as alias smf
import statsmodels.formula.api as smf
# formula: response ~ predictors
est = smf.ols(formula='%s ~ %s'%(y_picked, x_picked), data=df).fit()
print est.summary()
| gpl-3.0 |
e6-1/dl6-1 | helpers.py | 1 | 2681 | import numpy as np
import pandas as pd
def get_sub_seq(seq, start, end):
"""Get the sub sequence starting at the start index and ending at the end index."""
arr = seq[max([0, start]):end]
if start < 0:
arr = np.append(np.zeros((abs(start),2)), arr, axis=0)
for i in range(len(arr)):
if np.sum(arr[i]) == 0:
arr[i] = [1, 0]
return arr
def get_train_test_split(train_size, test_size):
inds = range(train_size + test_size)
test_inds = minibatch(inds, test_size, train_size + test_size)[0]
training_inds = [i for i in inds if i not in test_inds]
return training_inds, test_inds
def minibatch(data, batch_size, data_size):
"""Generates a minibatch from the given data and parameters."""
randomized = np.random.permutation(data)
batches = []
num_batches = 0
while num_batches * batch_size < data_size:
new_batch = randomized[num_batches * batch_size:(num_batches + 1) * batch_size]
batches.append(new_batch)
num_batches += 1
return batches
def get_glimpses(images, coords):
"""Gets a batch of glimpses."""
arr = []
for img, coord in zip(images, coords):
arr.append(get_glimpse(img, coord[0], coord[1]))
return np.array(arr)
def get_glimpse(image, x, y, stride=14):
"""Returns a subsection (glimpse) of the image centered on the given point."""
x = int(x) # Force to int
y = int(y) # Force to int
min_x = x - stride
max_x = x + stride
min_y = y - stride
max_y = y + stride
image_glimpse = image[min_y:max_y, min_x:max_x, :] # NOTE: row, column, RGB
# image_glimpse = image[min_y:max_y, min_x:max_x, 0] # NOTE: row, column, RGB; everything is greyscale; flatten RGB layer
return imgToArr(image_glimpse)
def get_data():
"""Returns a dictionary of data with keys for "inputs" and "outputs"."""
input_glimpses = np.zeros((80000, 28, 28, 3))
input_gazes = np.zeros((80000, 2))
outputs = np.zeros((80000, 2))
for batch in range(1, 9):
file_name = "data/glimpse_batchc_{0}.npz".format(batch)
array = np.load(file_name)
input_glimpses[(batch - 1) * 10000: batch * 10000] = array['frames']
input_gazes[(batch - 1) * 10000: batch * 10000] = array['gazes']
outputs[(batch - 1) * 10000: batch * 10000] = array['braking']
for i in range(len(outputs)):
if np.sum(outputs[i]) == 0:
outputs[i] = [1, 0]
sequences = np.array([get_sub_seq(outputs, i-3, i) for i in range(len(outputs))])
sequences = sequences.reshape(-1, 3*2)
data = {
"input_glimpses": input_glimpses,
"input_gazes": input_gazes,
"input_sequences": sequences,
"outputs": outputs
}
return data
| mit |
olgabot/poshsplice | poshsplice/hmmscan.py | 1 | 1282 | import pandas as pd
def read_hmmscan(hmmscan_out):
"""Read output from hmmscan
Parameters
----------
hmmscan_out : str
Filename of hmmscan output
Returns
-------
hmmscan_df : pandas.DataFrame
Parsed string of the hmmscan output
"""
entries = []
with open(hmmscan_out) as f:
for line in f.readlines():
if line.startswith('#'):
continue
split = line.split()
beginning = split[:22]
end = ' '.join(split[22:])
entries.append(beginning + [end])
columns = ['target_name', 'target_accession', 'target_length',
'query_name', 'query_accession', 'query_length',
'sequence_e_value', 'sequence_score', 'sequence_bias',
'domain_number', 'domain_total', 'domain_conditional_e_value',
'domain_independent_e_value', 'domain_score', 'domain_bias',
'target_start', 'target_stop', 'query_start', 'query_stop',
'query_domain_envelope_start', 'query_domain_envelope_stop',
'mean_posterior_probability', 'target_description']
df = pd.DataFrame.from_records(entries, columns=columns)
df = df.convert_objects(convert_numeric=True)
return df
| bsd-3-clause |
Sticklyman1936/workload-automation | wlauto/instrumentation/energy_model/__init__.py | 2 | 42148 | # Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=attribute-defined-outside-init,access-member-before-definition,redefined-outer-name
from __future__ import division
import os
import math
import time
from tempfile import mktemp
from base64 import b64encode
from collections import Counter, namedtuple
try:
import jinja2
import pandas as pd
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
import numpy as np
low_filter = np.vectorize(lambda x: x > 0 and x or 0) # pylint: disable=no-member
import_error = None
except ImportError as e:
import_error = e
jinja2 = None
pd = None
plt = None
np = None
low_filter = None
from wlauto import Instrument, Parameter, File
from wlauto.exceptions import ConfigError, InstrumentError, DeviceError
from wlauto.instrumentation import instrument_is_installed
from wlauto.utils.types import caseless_string, list_or_caseless_string, list_of_ints
from wlauto.utils.misc import list_to_mask
FREQ_TABLE_FILE = 'frequency_power_perf_data.csv'
CPUS_TABLE_FILE = 'projected_cap_power.csv'
MEASURED_CPUS_TABLE_FILE = 'measured_cap_power.csv'
IDLE_TABLE_FILE = 'idle_power_perf_data.csv'
REPORT_TEMPLATE_FILE = 'report.template'
EM_TEMPLATE_FILE = 'em.template'
IdlePowerState = namedtuple('IdlePowerState', ['power'])
CapPowerState = namedtuple('CapPowerState', ['cap', 'power'])
class EnergyModel(object):
def __init__(self):
self.big_cluster_idle_states = []
self.little_cluster_idle_states = []
self.big_cluster_cap_states = []
self.little_cluster_cap_states = []
self.big_core_idle_states = []
self.little_core_idle_states = []
self.big_core_cap_states = []
self.little_core_cap_states = []
def add_cap_entry(self, cluster, perf, clust_pow, core_pow):
if cluster == 'big':
self.big_cluster_cap_states.append(CapPowerState(perf, clust_pow))
self.big_core_cap_states.append(CapPowerState(perf, core_pow))
elif cluster == 'little':
self.little_cluster_cap_states.append(CapPowerState(perf, clust_pow))
self.little_core_cap_states.append(CapPowerState(perf, core_pow))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
def add_cluster_idle(self, cluster, values):
for value in values:
if cluster == 'big':
self.big_cluster_idle_states.append(IdlePowerState(value))
elif cluster == 'little':
self.little_cluster_idle_states.append(IdlePowerState(value))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
def add_core_idle(self, cluster, values):
for value in values:
if cluster == 'big':
self.big_core_idle_states.append(IdlePowerState(value))
elif cluster == 'little':
self.little_core_idle_states.append(IdlePowerState(value))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
class PowerPerformanceAnalysis(object):
def __init__(self, data):
self.summary = {}
big_freqs = data[data.cluster == 'big'].frequency.unique()
little_freqs = data[data.cluster == 'little'].frequency.unique()
self.summary['frequency'] = max(set(big_freqs).intersection(set(little_freqs)))
big_sc = data[(data.cluster == 'big') &
(data.frequency == self.summary['frequency']) &
(data.cpus == 1)]
little_sc = data[(data.cluster == 'little') &
(data.frequency == self.summary['frequency']) &
(data.cpus == 1)]
self.summary['performance_ratio'] = big_sc.performance.item() / little_sc.performance.item()
self.summary['power_ratio'] = big_sc.power.item() / little_sc.power.item()
self.summary['max_performance'] = data[data.cpus == 1].performance.max()
self.summary['max_power'] = data[data.cpus == 1].power.max()
def build_energy_model(freq_power_table, cpus_power, idle_power, first_cluster_idle_state):
# pylint: disable=too-many-locals
em = EnergyModel()
idle_power_sc = idle_power[idle_power.cpus == 1]
perf_data = get_normalized_single_core_data(freq_power_table)
for cluster in ['little', 'big']:
cluster_cpus_power = cpus_power[cluster].dropna()
cluster_power = cluster_cpus_power['cluster'].apply(int)
core_power = (cluster_cpus_power['1'] - cluster_power).apply(int)
performance = (perf_data[perf_data.cluster == cluster].performance_norm * 1024 / 100).apply(int)
for perf, clust_pow, core_pow in zip(performance, cluster_power, core_power):
em.add_cap_entry(cluster, perf, clust_pow, core_pow)
all_idle_power = idle_power_sc[idle_power_sc.cluster == cluster].power.values
# CORE idle states
# We want the delta of each state w.r.t. the power
# consumption of the shallowest one at this level (core_ref)
idle_core_power = low_filter(all_idle_power[:first_cluster_idle_state] -
all_idle_power[first_cluster_idle_state - 1])
# CLUSTER idle states
# We want the absolute value of each idle state
idle_cluster_power = low_filter(all_idle_power[first_cluster_idle_state - 1:])
em.add_cluster_idle(cluster, idle_cluster_power)
em.add_core_idle(cluster, idle_core_power)
return em
def generate_em_c_file(em, big_core, little_core, em_template_file, outfile):
with open(em_template_file) as fh:
em_template = jinja2.Template(fh.read())
em_text = em_template.render(
big_core=big_core,
little_core=little_core,
em=em,
)
with open(outfile, 'w') as wfh:
wfh.write(em_text)
return em_text
def generate_report(freq_power_table, measured_cpus_table, cpus_table, idle_power_table, # pylint: disable=unused-argument
report_template_file, device_name, em_text, outfile):
# pylint: disable=too-many-locals
cap_power_analysis = PowerPerformanceAnalysis(freq_power_table)
single_core_norm = get_normalized_single_core_data(freq_power_table)
cap_power_plot = get_cap_power_plot(single_core_norm)
idle_power_plot = get_idle_power_plot(idle_power_table)
fig, axes = plt.subplots(1, 2)
fig.set_size_inches(16, 8)
for i, cluster in enumerate(reversed(cpus_table.columns.levels[0])):
projected = cpus_table[cluster].dropna(subset=['1'])
plot_cpus_table(projected, axes[i], cluster)
cpus_plot_data = get_figure_data(fig)
with open(report_template_file) as fh:
report_template = jinja2.Template(fh.read())
html = report_template.render(
device_name=device_name,
freq_power_table=freq_power_table.set_index(['cluster', 'cpus', 'frequency']).to_html(),
cap_power_analysis=cap_power_analysis,
cap_power_plot=get_figure_data(cap_power_plot),
idle_power_table=idle_power_table.set_index(['cluster', 'cpus', 'state']).to_html(),
idle_power_plot=get_figure_data(idle_power_plot),
cpus_table=cpus_table.to_html(),
cpus_plot=cpus_plot_data,
em_text=em_text,
)
with open(outfile, 'w') as wfh:
wfh.write(html)
return html
def wa_result_to_power_perf_table(df, performance_metric, index):
table = df.pivot_table(index=index + ['iteration'],
columns='metric', values='value').reset_index()
result_mean = table.groupby(index).mean()
result_std = table.groupby(index).std()
result_std.columns = [c + ' std' for c in result_std.columns]
result_count = table.groupby(index).count()
result_count.columns = [c + ' count' for c in result_count.columns]
count_sqrt = result_count.apply(lambda x: x.apply(math.sqrt))
count_sqrt.columns = result_std.columns # match column names for division
result_error = 1.96 * result_std / count_sqrt # 1.96 == 95% confidence interval
result_error.columns = [c + ' error' for c in result_mean.columns]
result = pd.concat([result_mean, result_std, result_count, result_error], axis=1)
del result['iteration']
del result['iteration std']
del result['iteration count']
del result['iteration error']
updated_columns = []
for column in result.columns:
if column == performance_metric:
updated_columns.append('performance')
elif column == performance_metric + ' std':
updated_columns.append('performance_std')
elif column == performance_metric + ' error':
updated_columns.append('performance_error')
else:
updated_columns.append(column.replace(' ', '_'))
result.columns = updated_columns
result = result[sorted(result.columns)]
result.reset_index(inplace=True)
return result
def get_figure_data(fig, fmt='png'):
tmp = mktemp()
fig.savefig(tmp, format=fmt, bbox_inches='tight')
with open(tmp, 'rb') as fh:
image_data = b64encode(fh.read())
os.remove(tmp)
return image_data
def get_normalized_single_core_data(data):
finite_power = np.isfinite(data.power) # pylint: disable=no-member
finite_perf = np.isfinite(data.performance) # pylint: disable=no-member
data_single_core = data[(data.cpus == 1) & finite_perf & finite_power].copy()
data_single_core['performance_norm'] = (data_single_core.performance /
data_single_core.performance.max() * 100).apply(int)
data_single_core['power_norm'] = (data_single_core.power /
data_single_core.power.max() * 100).apply(int)
return data_single_core
def get_cap_power_plot(data_single_core):
big_single_core = data_single_core[(data_single_core.cluster == 'big') &
(data_single_core.cpus == 1)]
little_single_core = data_single_core[(data_single_core.cluster == 'little') &
(data_single_core.cpus == 1)]
fig, axes = plt.subplots(1, 1, figsize=(12, 8))
axes.plot(big_single_core.performance_norm,
big_single_core.power_norm,
marker='o')
axes.plot(little_single_core.performance_norm,
little_single_core.power_norm,
marker='o')
axes.set_xlim(0, 105)
axes.set_ylim(0, 105)
axes.set_xlabel('Performance (Normalized)')
axes.set_ylabel('Power (Normalized)')
axes.grid()
axes.legend(['big cluster', 'little cluster'], loc=0)
return fig
def get_idle_power_plot(df):
fig, axes = plt.subplots(1, 2, figsize=(15, 7))
for cluster, ax in zip(['little', 'big'], axes):
data = df[df.cluster == cluster].pivot_table(index=['state'], columns='cpus', values='power')
err = df[df.cluster == cluster].pivot_table(index=['state'], columns='cpus', values='power_error')
data.plot(kind='bar', ax=ax, rot=30, yerr=err)
ax.set_title('{} cluster'.format(cluster))
ax.set_xlim(-1, len(data.columns) - 0.5)
ax.set_ylabel('Power (mW)')
return fig
def fit_polynomial(s, n):
# pylint: disable=no-member
coeffs = np.polyfit(s.index, s.values, n)
poly = np.poly1d(coeffs)
return poly(s.index)
def get_cpus_power_table(data, index, opps, leak_factors): # pylint: disable=too-many-locals
# pylint: disable=no-member
power_table = data[[index, 'cluster', 'cpus', 'power']].pivot_table(index=index,
columns=['cluster', 'cpus'],
values='power')
bs_power_table = pd.DataFrame(index=power_table.index, columns=power_table.columns)
for cluster in power_table.columns.levels[0]:
power_table[cluster, 0] = (power_table[cluster, 1] -
(power_table[cluster, 2] -
power_table[cluster, 1]))
bs_power_table.loc[power_table[cluster, 1].notnull(), (cluster, 1)] = fit_polynomial(power_table[cluster, 1].dropna(), 2)
bs_power_table.loc[power_table[cluster, 2].notnull(), (cluster, 2)] = fit_polynomial(power_table[cluster, 2].dropna(), 2)
if opps[cluster] is None:
bs_power_table.loc[bs_power_table[cluster, 1].notnull(), (cluster, 0)] = \
(2 * power_table[cluster, 1] - power_table[cluster, 2]).values
else:
voltages = opps[cluster].set_index('frequency').sort_index()
leakage = leak_factors[cluster] * 2 * voltages['voltage']**3 / 0.9**3
leakage_delta = leakage - leakage[leakage.index[0]]
bs_power_table.loc[:, (cluster, 0)] = \
(2 * bs_power_table[cluster, 1] + leakage_delta - bs_power_table[cluster, 2])
# re-order columns and rename colum '0' to 'cluster'
power_table = power_table[sorted(power_table.columns,
cmp=lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]))]
bs_power_table = bs_power_table[sorted(bs_power_table.columns,
cmp=lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]))]
old_levels = power_table.columns.levels
power_table.columns.set_levels([old_levels[0], list(map(str, old_levels[1])[:-1]) + ['cluster']],
inplace=True)
bs_power_table.columns.set_levels([old_levels[0], list(map(str, old_levels[1])[:-1]) + ['cluster']],
inplace=True)
return power_table, bs_power_table
def plot_cpus_table(projected, ax, cluster):
projected.T.plot(ax=ax, marker='o')
ax.set_title('{} cluster'.format(cluster))
ax.set_xticklabels(projected.columns)
ax.set_xticks(range(0, 5))
ax.set_xlim(-0.5, len(projected.columns) - 0.5)
ax.set_ylabel('Power (mW)')
ax.grid(True)
def opp_table(d):
if d is None:
return None
return pd.DataFrame(d.items(), columns=['frequency', 'voltage'])
class EnergyModelInstrument(Instrument):
name = 'energy_model'
desicription = """
Generates a power mode for the device based on specified workload.
This instrument will execute the workload specified by the agenda (currently, only ``sysbench`` is
supported) and will use the resulting performance and power measurments to generate a power mode for
the device.
This instrument requires certain features to be present in the kernel:
1. cgroups and cpusets must be enabled.
2. cpufreq and userspace governor must be enabled.
3. cpuidle must be enabled.
"""
parameters = [
Parameter('device_name', kind=caseless_string,
description="""The name of the device to be used in generating the model. If not specified,
``device.name`` will be used. """),
Parameter('big_core', kind=caseless_string,
description="""The name of the "big" core in the big.LITTLE system; must match
one of the values in ``device.core_names``. """),
Parameter('performance_metric', kind=caseless_string, mandatory=True,
description="""Metric to be used as the performance indicator."""),
Parameter('power_metric', kind=list_or_caseless_string,
description="""Metric to be used as the power indicator. The value may contain a
``{core}`` format specifier that will be replaced with names of big
and little cores to drive the name of the metric for that cluster.
Ether this or ``energy_metric`` must be specified but not both."""),
Parameter('energy_metric', kind=list_or_caseless_string,
description="""Metric to be used as the energy indicator. The value may contain a
``{core}`` format specifier that will be replaced with names of big
and little cores to drive the name of the metric for that cluster.
this metric will be used to derive power by deviding through by
execution time. Either this or ``power_metric`` must be specified, but
not both."""),
Parameter('power_scaling_factor', kind=float, default=1.0,
description="""Power model specfies power in milliWatts. This is a scaling factor that
power_metric values will be multiplied by to get milliWatts."""),
Parameter('big_frequencies', kind=list_of_ints,
description="""List of frequencies to be used for big cores. These frequencies must
be supported by the cores. If this is not specified, all available
frequencies for the core (as read from cpufreq) will be used."""),
Parameter('little_frequencies', kind=list_of_ints,
description="""List of frequencies to be used for little cores. These frequencies must
be supported by the cores. If this is not specified, all available
frequencies for the core (as read from cpufreq) will be used."""),
Parameter('idle_workload', kind=str, default='idle',
description="Workload to be used while measuring idle power."),
Parameter('idle_workload_params', kind=dict, default={},
description="Parameter to pass to the idle workload."),
Parameter('first_cluster_idle_state', kind=int, default=-1,
description='''The index of the first cluster idle state on the device. Previous states
are assumed to be core idles. The default is ``-1``, i.e. only the last
idle state is assumed to affect the entire cluster.'''),
Parameter('no_hotplug', kind=bool, default=False,
description='''This options allows running the instrument without hotpluging cores on and off.
Disabling hotplugging will most likely produce a less accurate power model.'''),
Parameter('num_of_freqs_to_thermal_adjust', kind=int, default=0,
description="""The number of frequencies begining from the highest, to be adjusted for
the thermal effect."""),
Parameter('big_opps', kind=opp_table,
description="""OPP table mapping frequency to voltage (kHz --> mV) for the big cluster."""),
Parameter('little_opps', kind=opp_table,
description="""OPP table mapping frequency to voltage (kHz --> mV) for the little cluster."""),
Parameter('big_leakage', kind=int, default=120,
description="""
Leakage factor for the big cluster (this is specific to a particular core implementation).
"""),
Parameter('little_leakage', kind=int, default=60,
description="""
Leakage factor for the little cluster (this is specific to a particular core implementation).
"""),
]
def validate(self):
if import_error:
message = 'energy_model instrument requires pandas, jinja2 and matplotlib Python packages to be installed; got: "{}"'
raise InstrumentError(message.format(import_error.message))
for capability in ['cgroups', 'cpuidle']:
if not self.device.has(capability):
message = 'The Device does not appear to support {}; does it have the right module installed?'
raise ConfigError(message.format(capability))
device_cores = set(self.device.core_names)
if (self.power_metric and self.energy_metric) or not (self.power_metric or self.energy_metric):
raise ConfigError('Either power_metric or energy_metric must be specified (but not both).')
if not device_cores:
raise ConfigError('The Device does not appear to have core_names configured.')
elif len(device_cores) != 2:
raise ConfigError('The Device does not appear to be a big.LITTLE device.')
if self.big_core and self.big_core not in self.device.core_names:
raise ConfigError('Specified big_core "{}" is in divice {}'.format(self.big_core, self.device.name))
if not self.big_core:
self.big_core = self.device.core_names[-1] # the last core is usually "big" in existing big.LITTLE devices
if not self.device_name:
self.device_name = self.device.name
if self.num_of_freqs_to_thermal_adjust and not instrument_is_installed('daq'):
self.logger.warn('Adjustment for thermal effect requires daq instrument. Disabling adjustment')
self.num_of_freqs_to_thermal_adjust = 0
def initialize(self, context):
self.number_of_cpus = {}
self.report_template_file = context.resolver.get(File(self, REPORT_TEMPLATE_FILE))
self.em_template_file = context.resolver.get(File(self, EM_TEMPLATE_FILE))
self.little_core = (set(self.device.core_names) - set([self.big_core])).pop()
self.perform_runtime_validation()
self.enable_all_cores()
self.configure_clusters()
self.discover_idle_states()
self.disable_thermal_management()
self.initialize_job_queue(context)
self.initialize_result_tracking()
def setup(self, context):
if not context.spec.label.startswith('idle_'):
return
for idle_state in self.get_device_idle_states(self.measured_cluster):
if idle_state.index > context.spec.idle_state_index:
idle_state.disable = 1
else:
idle_state.disable = 0
def fast_start(self, context): # pylint: disable=unused-argument
self.start_time = time.time()
def fast_stop(self, context): # pylint: disable=unused-argument
self.run_time = time.time() - self.start_time
def on_iteration_start(self, context):
self.setup_measurement(context.spec.cluster)
def thermal_correction(self, context):
if not self.num_of_freqs_to_thermal_adjust or self.num_of_freqs_to_thermal_adjust > len(self.big_frequencies):
return 0
freqs = self.big_frequencies[-self.num_of_freqs_to_thermal_adjust:]
spec = context.result.spec
if spec.frequency not in freqs:
return 0
data_path = os.path.join(context.output_directory, 'daq', '{}.csv'.format(self.big_core))
data = pd.read_csv(data_path)['power']
return _adjust_for_thermal(data, filt_method=lambda x: pd.rolling_median(x, 1000), thresh=0.9, window=5000)
# slow to make sure power results have been generated
def slow_update_result(self, context): # pylint: disable=too-many-branches
spec = context.result.spec
cluster = spec.cluster
is_freq_iteration = spec.label.startswith('freq_')
perf_metric = 0
power_metric = 0
thermal_adjusted_power = 0
if is_freq_iteration and cluster == 'big':
thermal_adjusted_power = self.thermal_correction(context)
for metric in context.result.metrics:
if metric.name == self.performance_metric:
perf_metric = metric.value
elif thermal_adjusted_power and metric.name in self.big_power_metrics:
power_metric += thermal_adjusted_power * self.power_scaling_factor
elif (cluster == 'big') and metric.name in self.big_power_metrics:
power_metric += metric.value * self.power_scaling_factor
elif (cluster == 'little') and metric.name in self.little_power_metrics:
power_metric += metric.value * self.power_scaling_factor
elif thermal_adjusted_power and metric.name in self.big_energy_metrics:
power_metric += thermal_adjusted_power / self.run_time * self.power_scaling_factor
elif (cluster == 'big') and metric.name in self.big_energy_metrics:
power_metric += metric.value / self.run_time * self.power_scaling_factor
elif (cluster == 'little') and metric.name in self.little_energy_metrics:
power_metric += metric.value / self.run_time * self.power_scaling_factor
if not (power_metric and (perf_metric or not is_freq_iteration)):
message = 'Incomplete results for {} iteration{}'
raise InstrumentError(message.format(context.result.spec.id, context.current_iteration))
if is_freq_iteration:
index_matter = [cluster, spec.num_cpus,
spec.frequency, context.result.iteration]
data = self.freq_data
else:
index_matter = [cluster, spec.num_cpus,
spec.idle_state_id, spec.idle_state_desc, context.result.iteration]
data = self.idle_data
if self.no_hotplug:
# due to that fact that hotpluging was disabled, power has to be artificially scaled
# to the number of cores that should have been active if hotplugging had occurred.
power_metric = spec.num_cpus * (power_metric / self.number_of_cpus[cluster])
data.append(index_matter + ['performance', perf_metric])
data.append(index_matter + ['power', power_metric])
def before_overall_results_processing(self, context):
# pylint: disable=too-many-locals
if not self.idle_data or not self.freq_data:
self.logger.warning('Run aborted early; not generating energy_model.')
return
output_directory = os.path.join(context.output_directory, 'energy_model')
os.makedirs(output_directory)
df = pd.DataFrame(self.idle_data, columns=['cluster', 'cpus', 'state_id',
'state', 'iteration', 'metric', 'value'])
idle_power_table = wa_result_to_power_perf_table(df, '', index=['cluster', 'cpus', 'state'])
idle_output = os.path.join(output_directory, IDLE_TABLE_FILE)
with open(idle_output, 'w') as wfh:
idle_power_table.to_csv(wfh, index=False)
context.add_artifact('idle_power_table', idle_output, 'export')
df = pd.DataFrame(self.freq_data,
columns=['cluster', 'cpus', 'frequency', 'iteration', 'metric', 'value'])
freq_power_table = wa_result_to_power_perf_table(df, self.performance_metric,
index=['cluster', 'cpus', 'frequency'])
freq_output = os.path.join(output_directory, FREQ_TABLE_FILE)
with open(freq_output, 'w') as wfh:
freq_power_table.to_csv(wfh, index=False)
context.add_artifact('freq_power_table', freq_output, 'export')
if self.big_opps is None or self.little_opps is None:
message = 'OPPs not specified for one or both clusters; cluster power will not be adjusted for leakage.'
self.logger.warning(message)
opps = {'big': self.big_opps, 'little': self.little_opps}
leakages = {'big': self.big_leakage, 'little': self.little_leakage}
try:
measured_cpus_table, cpus_table = get_cpus_power_table(freq_power_table, 'frequency', opps, leakages)
except (ValueError, KeyError, IndexError) as e:
self.logger.error('Could not create cpu power tables: {}'.format(e))
return
measured_cpus_output = os.path.join(output_directory, MEASURED_CPUS_TABLE_FILE)
with open(measured_cpus_output, 'w') as wfh:
measured_cpus_table.to_csv(wfh)
context.add_artifact('measured_cpus_table', measured_cpus_output, 'export')
cpus_output = os.path.join(output_directory, CPUS_TABLE_FILE)
with open(cpus_output, 'w') as wfh:
cpus_table.to_csv(wfh)
context.add_artifact('cpus_table', cpus_output, 'export')
em = build_energy_model(freq_power_table, cpus_table, idle_power_table, self.first_cluster_idle_state)
em_file = os.path.join(output_directory, '{}_em.c'.format(self.device_name))
em_text = generate_em_c_file(em, self.big_core, self.little_core,
self.em_template_file, em_file)
context.add_artifact('em', em_file, 'data')
report_file = os.path.join(output_directory, 'report.html')
generate_report(freq_power_table, measured_cpus_table, cpus_table,
idle_power_table, self.report_template_file,
self.device_name, em_text, report_file)
context.add_artifact('pm_report', report_file, 'export')
def initialize_result_tracking(self):
self.freq_data = []
self.idle_data = []
self.big_power_metrics = []
self.little_power_metrics = []
self.big_energy_metrics = []
self.little_energy_metrics = []
if self.power_metric:
self.big_power_metrics = [pm.format(core=self.big_core) for pm in self.power_metric]
self.little_power_metrics = [pm.format(core=self.little_core) for pm in self.power_metric]
else: # must be energy_metric
self.big_energy_metrics = [em.format(core=self.big_core) for em in self.energy_metric]
self.little_energy_metrics = [em.format(core=self.little_core) for em in self.energy_metric]
def configure_clusters(self):
self.measured_cores = None
self.measuring_cores = None
self.cpuset = self.device.get_cgroup_controller('cpuset')
self.cpuset.create_group('big', self.big_cpus, [0])
self.cpuset.create_group('little', self.little_cpus, [0])
for cluster in set(self.device.core_clusters):
self.device.set_cluster_governor(cluster, 'userspace')
def discover_idle_states(self):
online_cpu = self.device.get_online_cpus(self.big_core)[0]
self.big_idle_states = self.device.get_cpuidle_states(online_cpu)
online_cpu = self.device.get_online_cpus(self.little_core)[0]
self.little_idle_states = self.device.get_cpuidle_states(online_cpu)
if not (len(self.big_idle_states) >= 2 and len(self.little_idle_states) >= 2):
raise DeviceError('There do not appeart to be at least two idle states '
'on at least one of the clusters.')
def setup_measurement(self, measured):
measuring = 'big' if measured == 'little' else 'little'
self.measured_cluster = measured
self.measuring_cluster = measuring
self.measured_cpus = self.big_cpus if measured == 'big' else self.little_cpus
self.measuring_cpus = self.little_cpus if measured == 'big' else self.big_cpus
self.reset()
def reset(self):
self.enable_all_cores()
self.enable_all_idle_states()
self.reset_cgroups()
self.cpuset.move_all_tasks_to(self.measuring_cluster)
server_process = 'adbd' if self.device.platform == 'android' else 'sshd'
server_pids = self.device.get_pids_of(server_process)
children_ps = [e for e in self.device.ps()
if e.ppid in server_pids and e.name != 'sshd']
children_pids = [e.pid for e in children_ps]
pids_to_move = server_pids + children_pids
self.cpuset.root.add_tasks(pids_to_move)
for pid in pids_to_move:
try:
self.device.execute('busybox taskset -p 0x{:x} {}'.format(list_to_mask(self.measuring_cpus), pid))
except DeviceError:
pass
def enable_all_cores(self):
counter = Counter(self.device.core_names)
for core, number in counter.iteritems():
self.device.set_number_of_online_cpus(core, number)
self.big_cpus = self.device.get_online_cpus(self.big_core)
self.little_cpus = self.device.get_online_cpus(self.little_core)
def enable_all_idle_states(self):
for cpu in self.device.online_cpus:
for state in self.device.get_cpuidle_states(cpu):
state.disable = 0
def reset_cgroups(self):
self.big_cpus = self.device.get_online_cpus(self.big_core)
self.little_cpus = self.device.get_online_cpus(self.little_core)
self.cpuset.big.set(self.big_cpus, 0)
self.cpuset.little.set(self.little_cpus, 0)
def perform_runtime_validation(self):
if not self.device.is_rooted:
raise InstrumentError('the device must be rooted to generate energy models')
if 'userspace' not in self.device.list_available_cluster_governors(0):
raise InstrumentError('userspace cpufreq governor must be enabled')
error_message = 'Frequency {} is not supported by {} cores'
available_frequencies = self.device.list_available_core_frequencies(self.big_core)
if self.big_frequencies:
for freq in self.big_frequencies:
if freq not in available_frequencies:
raise ConfigError(error_message.format(freq, self.big_core))
else:
self.big_frequencies = available_frequencies
available_frequencies = self.device.list_available_core_frequencies(self.little_core)
if self.little_frequencies:
for freq in self.little_frequencies:
if freq not in available_frequencies:
raise ConfigError(error_message.format(freq, self.little_core))
else:
self.little_frequencies = available_frequencies
def initialize_job_queue(self, context):
old_specs = []
for job in context.runner.job_queue:
if job.spec not in old_specs:
old_specs.append(job.spec)
new_specs = self.get_cluster_specs(old_specs, 'big', context)
new_specs.extend(self.get_cluster_specs(old_specs, 'little', context))
# Update config to refect jobs that will actually run.
context.config.workload_specs = new_specs
config_file = os.path.join(context.host_working_directory, 'run_config.json')
with open(config_file, 'wb') as wfh:
context.config.serialize(wfh)
context.runner.init_queue(new_specs)
def get_cluster_specs(self, old_specs, cluster, context):
core = self.get_core_name(cluster)
self.number_of_cpus[cluster] = sum([1 for c in self.device.core_names if c == core])
cluster_frequencies = self.get_frequencies_param(cluster)
if not cluster_frequencies:
raise InstrumentError('Could not read available frequencies for {}'.format(core))
min_frequency = min(cluster_frequencies)
idle_states = self.get_device_idle_states(cluster)
new_specs = []
for state in idle_states:
for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1):
spec = old_specs[0].copy()
spec.workload_name = self.idle_workload
spec.workload_parameters = self.idle_workload_params
spec.idle_state_id = state.id
spec.idle_state_desc = state.desc
spec.idle_state_index = state.index
if not self.no_hotplug:
spec.runtime_parameters['{}_cores'.format(core)] = num_cpus
spec.runtime_parameters['{}_frequency'.format(core)] = min_frequency
if self.device.platform == 'chromeos':
spec.runtime_parameters['ui'] = 'off'
spec.cluster = cluster
spec.num_cpus = num_cpus
spec.id = '{}_idle_{}_{}'.format(cluster, state.id, num_cpus)
spec.label = 'idle_{}'.format(cluster)
spec.number_of_iterations = old_specs[0].number_of_iterations
spec.load(self.device, context.config.ext_loader)
spec.workload.init_resources(context)
spec.workload.validate()
new_specs.append(spec)
for old_spec in old_specs:
if old_spec.workload_name not in ['sysbench', 'dhrystone']:
raise ConfigError('Only sysbench and dhrystone workloads currently supported for energy_model generation.')
for freq in cluster_frequencies:
for num_cpus in xrange(1, self.number_of_cpus[cluster] + 1):
spec = old_spec.copy()
spec.runtime_parameters['{}_frequency'.format(core)] = freq
if not self.no_hotplug:
spec.runtime_parameters['{}_cores'.format(core)] = num_cpus
if self.device.platform == 'chromeos':
spec.runtime_parameters['ui'] = 'off'
spec.id = '{}_{}_{}'.format(cluster, num_cpus, freq)
spec.label = 'freq_{}_{}'.format(cluster, spec.label)
spec.workload_parameters['taskset_mask'] = list_to_mask(self.get_cpus(cluster))
spec.workload_parameters['threads'] = num_cpus
if old_spec.workload_name == 'sysbench':
# max_requests set to an arbitrary high values to make sure
# sysbench runs for full duriation even on highly
# performant cores.
spec.workload_parameters['max_requests'] = 10000000
spec.cluster = cluster
spec.num_cpus = num_cpus
spec.frequency = freq
spec.load(self.device, context.config.ext_loader)
spec.workload.init_resources(context)
spec.workload.validate()
new_specs.append(spec)
return new_specs
def disable_thermal_management(self):
if self.device.file_exists('/sys/class/thermal/thermal_zone0'):
tzone_paths = self.device.execute('ls /sys/class/thermal/thermal_zone*')
for tzpath in tzone_paths.strip().split():
mode_file = '{}/mode'.format(tzpath)
if self.device.file_exists(mode_file):
self.device.set_sysfile_value(mode_file, 'disabled')
def get_device_idle_states(self, cluster):
if cluster == 'big':
online_cpus = self.device.get_online_cpus(self.big_core)
else:
online_cpus = self.device.get_online_cpus(self.little_core)
idle_states = []
for cpu in online_cpus:
idle_states.extend(self.device.get_cpuidle_states(cpu))
return idle_states
def get_core_name(self, cluster):
if cluster == 'big':
return self.big_core
else:
return self.little_core
def get_cpus(self, cluster):
if cluster == 'big':
return self.big_cpus
else:
return self.little_cpus
def get_frequencies_param(self, cluster):
if cluster == 'big':
return self.big_frequencies
else:
return self.little_frequencies
def _adjust_for_thermal(data, filt_method=lambda x: x, thresh=0.9, window=5000, tdiff_threshold=10000):
n = filt_method(data)
n = n[~np.isnan(n)] # pylint: disable=no-member
d = np.diff(n) # pylint: disable=no-member
d = d[~np.isnan(d)] # pylint: disable=no-member
dmin = min(d)
dmax = max(d)
index_up = np.max((d > dmax * thresh).nonzero()) # pylint: disable=no-member
index_down = np.min((d < dmin * thresh).nonzero()) # pylint: disable=no-member
low_average = np.average(n[index_up:index_up + window]) # pylint: disable=no-member
high_average = np.average(n[index_down - window:index_down]) # pylint: disable=no-member
if low_average > high_average or index_down - index_up < tdiff_threshold:
return 0
else:
return low_average
if __name__ == '__main__':
import sys # pylint: disable=wrong-import-position,wrong-import-order
indir, outdir = sys.argv[1], sys.argv[2]
device_name = 'odroidxu3'
big_core = 'a15'
little_core = 'a7'
first_cluster_idle_state = -1
this_dir = os.path.dirname(__file__)
report_template_file = os.path.join(this_dir, REPORT_TEMPLATE_FILE)
em_template_file = os.path.join(this_dir, EM_TEMPLATE_FILE)
freq_power_table = pd.read_csv(os.path.join(indir, FREQ_TABLE_FILE))
measured_cpus_table, cpus_table = pd.read_csv(os.path.join(indir, CPUS_TABLE_FILE), # pylint: disable=unbalanced-tuple-unpacking
header=range(2), index_col=0)
idle_power_table = pd.read_csv(os.path.join(indir, IDLE_TABLE_FILE))
if not os.path.exists(outdir):
os.makedirs(outdir)
report_file = os.path.join(outdir, 'report.html')
em_file = os.path.join(outdir, '{}_em.c'.format(device_name))
em = build_energy_model(freq_power_table, cpus_table,
idle_power_table, first_cluster_idle_state)
em_text = generate_em_c_file(em, big_core, little_core,
em_template_file, em_file)
generate_report(freq_power_table, measured_cpus_table, cpus_table,
idle_power_table, report_template_file, device_name,
em_text, report_file)
| apache-2.0 |
vlukes/sfepy | script/plot_condition_numbers.py | 4 | 5354 | #!/usr/bin/env python
"""
Plot conditions numbers w.r.t. polynomial approximation order of reference
element matrices for various FE polynomial spaces (bases).
"""
from __future__ import absolute_import
import sys
sys.path.append('.')
from argparse import ArgumentParser
import numpy as nm
import matplotlib.pyplot as plt
from sfepy import data_dir
from sfepy.base.base import output, assert_
from sfepy.base.timing import Timer
from sfepy.discrete import FieldVariable, Material, Integral
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.solvers import eig
from sfepy.mechanics.matcoefs import stiffness_from_lame
helps = {
'basis' :
'name of the FE basis [default: %(default)s]',
'max_order' :
'maximum order of polynomials [default: %(default)s]',
'matrix_type' :
'matrix type, one of "elasticity", "laplace" [default: %(default)s]',
'geometry' :
'reference element geometry, one of "2_3", "2_4", "3_4", "3_8"'
' [default: %(default)s]',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-b', '--basis', metavar='name',
action='store', dest='basis',
default='lagrange', help=helps['basis'])
parser.add_argument('-n', '--max-order', metavar='order', type=int,
action='store', dest='max_order',
default=10, help=helps['max_order'])
parser.add_argument('-m', '--matrix', metavar='type',
action='store', dest='matrix_type',
default='laplace', help=helps['matrix_type'])
parser.add_argument('-g', '--geometry', metavar='name',
action='store', dest='geometry',
default='2_4', help=helps['geometry'])
options = parser.parse_args()
dim, n_ep = int(options.geometry[0]), int(options.geometry[2])
output('reference element geometry:')
output(' dimension: %d, vertices: %d' % (dim, n_ep))
n_c = {'laplace' : 1, 'elasticity' : dim}[options.matrix_type]
output('matrix type:', options.matrix_type)
output('number of variable components:', n_c)
output('polynomial space:', options.basis)
output('max. order:', options.max_order)
mesh = Mesh.from_file(data_dir + '/meshes/elements/%s_1.mesh'
% options.geometry)
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
orders = nm.arange(1, options.max_order + 1, dtype=nm.int)
conds = []
order_fix = 0 if options.geometry in ['2_4', '3_8'] else 1
for order in orders:
output('order:', order, '...')
field = Field.from_args('fu', nm.float64, n_c, omega,
approx_order=order,
space='H1', poly_space_base=options.basis)
to = field.approx_order
quad_order = 2 * (max(to - order_fix, 0))
output('quadrature order:', quad_order)
integral = Integral('i', order=quad_order)
qp, _ = integral.get_qp(options.geometry)
output('number of quadrature points:', qp.shape[0])
u = FieldVariable('u', 'unknown', field)
v = FieldVariable('v', 'test', field, primary_var_name='u')
m = Material('m', D=stiffness_from_lame(dim, 1.0, 1.0), mu=1.0)
if options.matrix_type == 'laplace':
term = Term.new('dw_laplace(m.mu, v, u)',
integral, omega, m=m, v=v, u=u)
n_zero = 1
else:
assert_(options.matrix_type == 'elasticity')
term = Term.new('dw_lin_elastic(m.D, v, u)',
integral, omega, m=m, v=v, u=u)
n_zero = (dim + 1) * dim / 2
term.setup()
output('assembling...')
timer = Timer(start=True)
mtx, iels = term.evaluate(mode='weak', diff_var='u')
output('...done in %.2f s' % timer.stop())
mtx = mtx[0, 0]
try:
assert_(nm.max(nm.abs(mtx - mtx.T)) < 1e-10)
except:
from sfepy.base.base import debug; debug()
output('matrix shape:', mtx.shape)
eigs = eig(mtx, method='eig.sgscipy', eigenvectors=False)
eigs.sort()
# Zero 'true' zeros.
eigs[:n_zero] = 0.0
ii = nm.where(eigs < 0.0)[0]
if len(ii):
output('matrix is not positive semi-definite!')
ii = nm.where(eigs[n_zero:] < 1e-12)[0]
if len(ii):
output('matrix has more than %d zero eigenvalues!' % n_zero)
output('smallest eigs:\n', eigs[:10])
ii = nm.where(eigs > 0.0)[0]
emin, emax = eigs[ii[[0, -1]]]
output('min:', emin, 'max:', emax)
cond = emax / emin
conds.append(cond)
output('condition number:', cond)
output('...done')
plt.figure(1)
plt.semilogy(orders, conds)
plt.xticks(orders, orders)
plt.xlabel('polynomial order')
plt.ylabel('condition number')
plt.grid()
plt.figure(2)
plt.loglog(orders, conds)
plt.xticks(orders, orders)
plt.xlabel('polynomial order')
plt.ylabel('condition number')
plt.grid()
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
kaichogami/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
henrykironde/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
albz/Architect | utils/python_utils/general_plot_utilities/plot_phasespace.py | 1 | 2234 | #!/usr/bin/python
######################################################################
# Name: plot_phasespace
# Author: A. Marocchino
# Date: 2017-11-02
# Purpose: plot phase space for architect
# Source: python
#####################################################################
### loading shell commands
import os, os.path, glob, sys, shutil
import time, datetime
import scipy
import numpy as np
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import pylab as pyl
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
plt.style.use(os.path.join(os.path.expanduser('~'),'Codes','Python_general_controllers','python_plot','plot_style_ppth.mplstyle'))
sys.path.append(os.path.join(os.path.expanduser('~'),'Codes/Code_Architect/Architect/utils/python_utils/architect_graphycal_unit'))
from read_architect_bin import *
### --- ###
# --- inputs --- #
path = os.getcwd()
read_architect_bin(path,'PS')
# if(len(sys.argv)<2):
# print('not enought input arguments')
# print('select bunch')
# sys.exit()
# bunch_select = int(sys.argv[1])
#
#
# --- plot --- #
fig = pyl.figure(1)
fig.set_size_inches(3.0,3.0,forward=True)
ax1 = pyl.subplot(111)
select_bunch = (np.asarray(var.bunch_id)==1)
select_dcut = (np.asarray(var.dcut)==1.0)
selected = np.asarray(select_bunch) & np.asarray(select_dcut)
z0 = np.mean(var.z[selected])
ax1.plot(var.z[selected]-z0,-var.pz[selected], '.', markersize=0.2, lw=1, label=r"Driver")
select_bunch = (np.asarray(var.bunch_id)==2)
select_dcut = (np.asarray(var.dcut)==1.0)
selected = np.asarray(select_bunch) & np.asarray(select_dcut)
ax1.plot(var.z[selected]-z0,-(var.pz[selected]-200.), '.', markersize=0.2, lw=1, label=r"Tr. bunch")
#--- labels ---#
ax1.set_xlabel(r'Z ($\mu$m)')
ax1.set_ylabel(r'$\beta \gamma$')
#
#
# ax1.legend(loc=9, ncol=2, prop={'size':7.5})
# ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),fancybox=True, shadow=True, ncol=3)
# # pyl.subplots_adjust(bottom=0.34,left=0.210)
#
# # ax1.xaxis.set_major_locator(MultipleLocator(1.))
# # ax1.xaxis.set_minor_locator(MultipleLocator(.5))
#
pyl.savefig(os.path.join(path,'im_longitudinal_phasespace.pdf'), format='pdf')
plt.show()
| gpl-3.0 |
arokem/scipy | scipy/special/_precompute/struve_convergence.py | 17 | 3498 | """
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import (_struve_power_series,
_struve_asymp_large_z,
_struve_bessel_series)
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
main()
| bsd-3-clause |
mikehankey/fireball_camera | test-install.py | 1 | 18395 | from __future__ import print_function
import os
import subprocess
import sys
import ephem
import math
from amscommon import read_config
from amscommon import write_config
from amscommon import put_device_info
from collections import defaultdict
from PIL import Image, ImageChops
import numpy as np
from pathlib import Path
import requests
import cv2
import os
import time
import datetime
import sys
from collections import deque
import iproc
from amscommon import read_sun, read_config
import sys
import time
import os
import requests
from amscommon import read_config
import os
import os
import requests, json
import sys
import netifaces
import os
import settings
from amscommon import read_config, write_config, put_device_info
from collections import defaultdict
import os
import sys
import subprocess
import time
import os
import requests
from amscommon import read_config
from pathlib import Path
import subprocess
import requests
import pytesseract
from io import BytesIO
from pathlib import Path
import glob
import collections
from collections import deque
from PIL import Image, ImageChops
from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
import ephem
import sys
import os
import settings
from amscommon import read_config, caldate
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageTk
from PIL import ImageEnhance
import subprocess
import requests
import pytesseract
from io import BytesIO
from pathlib import Path
import glob
import collections
from collections import deque
from PIL import Image, ImageChops
from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
import ephem
import sys
import os
import settings
from amscommon import read_config, caldate
import settings
import sys
import os
import time as gtime
from os import listdir
from os.path import isfile, join
import json, requests
import numpy as np
from datetime import datetime, date, time
from dateutil import parser
from amscommon import read_config
#from math import radians, cos, sin, asin, sqrt
from math import *
import requests
import settings
import sys
import datetime
from datetime import timedelta
from amscommon import read_config
import glob
from math import *
import settings
import sys
import os
import time as gtime
from os import listdir
from os.path import isfile, join
import json, requests
import numpy as np
from datetime import datetime, date, time
from dateutil import parser
import sys
import os
import settings
from crypt import Crypt
import requests
import mimetypes
import sys
import netifaces
import settings
from collections import deque
from PIL import Image, ImageChops
from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
import sys
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
from sklearn import datasets
from PIL import Image, ImageChops
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from random import randint
import time
import ephem
from PIL import Image
import cv2
import glob
import sys
import os
import numpy as np
import datetime
from pathlib import Path
import subprocess
from amscommon import read_config
import math
import time
from sklearn.cluster import Birch
from collections import deque
import matplotlib.pyplot as plt
import os
from astropy.io import fits
from astropy.wcs import WCS
from astropy import units as u
#import collections
#from collections import deque
from PIL import Image, ImageChops
#from queue import Queue
#import multiprocessing
#import datetime
import cv2
import numpy as np
#import iproc
import time
#import ephem
import sys
#import os
import subprocess
import numpy as np
from pathlib import Path
import requests
import cv2
import os
import time
import datetime
import sys
from collections import deque
import iproc
from amscommon import read_sun, read_config
from collections import defaultdict
import os
import time
from amscommon import read_config
import netifaces
import cv2
import numpy as np
import sys
import os
import time
import sys
import os
import subprocess
import cv2
import subprocess
import time
from collections import defaultdict
from amscommon import read_config, read_sun
import requests
from urllib.request import urlretrieve
import re
# important backdoor URLS
import time
import requests
import sys
import os
from amscommon import read_config
import sys
import time
from collections import defaultdict
from amscommon import read_config, read_sun
import os
import requests
from urllib.request import urlretrieve
# important backdoor URLS
import datetime
import sys
import requests
from amscommon import read_config
import math
import sys
#from subprocess import call
from pathlib import Path
import os
import requests
from collections import deque
#from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
import syslog
import sys
#from subprocess import call
import numpy as np
from pathlib import Path
import os
import requests
from collections import deque
#from queue import Queue
import multiprocessing
#from multiprocessing import Process, Manager
from amscommon import read_config
import datetime
import cv2
import iproc
import time
import syslog
import sys
#from subprocess import call
import os
import requests
from collections import deque
#from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
import syslog
import sys
#from subprocess import call
import os
import requests
from collections import deque
#from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
import syslog
import sys
#from subprocess import call
import os
import requests
from collections import deque
#from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
import syslog
import sys
import sys
from functools import reduce
import numpy as np
import cv2
import os
import itertools as it
from contextlib import contextmanager
import cv2
import numpy as np
import sys
import os
import time
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
from sklearn import datasets
from PIL import Image, ImageChops
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from random import randint
import time
import ephem
from PIL import Image
import cv2
import glob
import sys
import os
import numpy as np
import datetime
from pathlib import Path
import subprocess
from amscommon import read_config
import math
import time
from sklearn.cluster import Birch
from collections import deque
import dropbox
import sys
import cv2
import sys
import glob
import time
#import collections
#from collections import deque
from PIL import Image, ImageChops
#from queue import Queue
#import multiprocessing
#import datetime
import cv2
import numpy as np
#import iproc
import time
#import ephem
import sys
#import os
from astride import Streak
from astropy.io import fits
from PIL import Image, ImageChops
import time
import multiprocessing
import cv2
import sys
import numpy as np
from astride import Streak
from astropy.io import fits
from PIL import Image, ImageChops
import time
import multiprocessing
import cv2
import sys
import numpy as np
import warnings
import glob
import sys
import subprocess
import os
import time
import numpy as np
import cv2
from common import splitfn
import os
import sys
import getopt
from glob import glob
import numpy as np
import cv2
import glob
import os
import sys
import time
import os
import requests
import re
import math
import fitsio
import sys
from amscommon import read_config
import subprocess
import os
import numpy as np
#from tkinter import *
from PIL import Image
from PIL import ImageDraw
from PIL import ImageTk
from PIL import ImageEnhance
#from tkinter.filedialog import askopenfilename
#from tkinter.ttk import *
import cv2
import matplotlib.pyplot as plt
import os
from astropy.io import fits
from astropy.wcs import WCS
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
import numpy as np
import scipy.interpolate
import cv2
import subprocess
import os
import numpy as np
from tkinter import *
from PIL import Image
from PIL import ImageDraw
from PIL import ImageTk
from PIL import ImageEnhance
from tkinter.filedialog import askopenfilename
from tkinter.ttk import *
import cv2
import os
import numpy as np
from tkinter import *
from PIL import Image
from PIL import ImageDraw
from PIL import ImageTk
from PIL import ImageEnhance
from tkinter.filedialog import askopenfilename
import cv2
import datetime
import cv2
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from sklearn import datasets
import requests
import sys
from amscommon import read_config
import settings
import MFTCalibration as MFTC
import MyDialog as MD
import tkinter as tk
from tkinter import ttk
import subprocess
import os
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageTk
from PIL import ImageEnhance
from tkinter.filedialog import askopenfilename
#import tkSimpleDialog as tks
import cv2
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import time
from sklearn.cluster import KMeans
from sklearn import datasets
from PIL import Image, ImageChops
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from random import randint
import time
import ephem
from PIL import Image
import cv2
import glob
import sys
import os
import numpy as np
import datetime
from pathlib import Path
import subprocess
from amscommon import read_config
import math
import time
from sklearn.cluster import Birch
from collections import deque
import re
import fitsio
import sys
from PIL import ImageFont
import subprocess
import os
import numpy as np
#from tkinter import *
from PIL import Image
from PIL import ImageDraw
from PIL import ImageTk
from PIL import ImageEnhance
#from tkinter.filedialog import askopenfilename
#from tkinter.ttk import *
import cv2
import matplotlib.pyplot as plt
import os
from astropy.io import fits
from astropy.wcs import WCS
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
import brightstardata as bsd
import MFTCalibration as MFTC
import MyDialog as MD
from amscommon import read_config
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
import subprocess
import os
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageTk
from PIL import ImageEnhance
from tkinter.filedialog import askopenfilename
#import tkSimpleDialog as tks
import cv2
import requests, json
import sys
import netifaces
import os
import settings
from amscommon import read_config, write_config, put_device_info
import requests, json
import sys
import netifaces
import os
import os.path
import settings
from datetime import datetime
#from config_func import add_to_config, read_config, get_device_info_and_config
import time
import glob
import ephem
import subprocess
from pathlib import Path
import os
from amscommon import read_config
from tkinter import *
#import RPi.GPIO as GPIO
import time
import sys
import cv2
import iproc
import time
import sys
import numpy as np
from collections import deque
from random import randint
from skimage import morphology
import subprocess
import requests
import pytesseract
from io import BytesIO
from pathlib import Path
import glob
import collections
from collections import deque
from PIL import Image, ImageChops
from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
import ephem
import sys
import os
import settings
import math
from amscommon import read_config, caldate
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageTk
from PIL import ImageEnhance
import glob
import ephem
from pathlib import Path
import sys
import ProcessVideo as PV
import time
import os
import subprocess
import time
from amscommon import read_config
import json
from amscommon import read_config
import fitsio
import sys
import numpy as np
from pathlib import Path
import os
import requests
from collections import deque
import multiprocessing
from amscommon import read_config
import datetime
import cv2
import iproc
import time
import syslog
import sys
# import the necessary packages
#from imutils.video import FileVideoStream
#from imutils.video import FPS
import numpy as np
import argparse
#import imutils
import time
import cv2
import os
import subprocess
import sys
import ephem
import math
from amscommon import read_config
from amscommon import write_config
from amscommon import put_device_info
from PIL import Image
import cv2
import glob
import sys
import numpy as np
import os
from amscommon import read_config
import MFTCalibration as MFTC
import MyDialog as MD
import tkinter as tk
from tkinter import ttk
import subprocess
import os
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageTk
from PIL import ImageEnhance
from tkinter.filedialog import askopenfilename
from tkinter import simpledialog as tks
import cv2
import glob
import datetime
import os
from amscommon import read_config
import time
import os
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
from sklearn import datasets
from PIL import Image, ImageChops
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from random import randint
import time
import ephem
from PIL import Image
import cv2
import glob
import sys
import os
import numpy as np
import datetime
from pathlib import Path
import subprocess
from amscommon import read_config
import math
import time
from sklearn.cluster import Birch
from collections import deque
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from sklearn.cluster import KMeans
from sklearn import datasets
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from random import randint
import time
import ephem
from PIL import Image
import cv2
import glob
import sys
import os
import numpy as np
import datetime
from pathlib import Path
import subprocess
from amscommon import read_config
import math
import time
from sklearn.cluster import Birch
from collections import deque
import requests
from amscommon import read_config
import sys
import requests
import sys
from amscommon import read_config
import numpy as np
from pathlib import Path
import requests
import cv2
import os
import time
import datetime
import sys
from collections import deque
import iproc
from amscommon import read_sun, read_config
from collections import deque
from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
# import the necessary packages
import numpy as np
import cv2
import sys
from amscommon import read_config
import glob
import os
import glob
import errno
import os
import ephem
from amscommon import read_config
import subprocess
import glob
import os
from math import atan, tan
import numpy as np
from numpy import ones,vstack
from numpy.linalg import lstsq, solve
import ephem
import time
import ephem
import datetime
import sys
import glob
import subprocess
import glob
import os
from pathlib import Path
import sys
#import Adafruit_DHT
import os
import glob
import time
from view import log_fireball_event
from amscommon import read_config, caldate
from view import log_motion_capture
from amscommon import read_config, caldate
from PIL import Image
import cv2
import glob
import sys
import numpy as np
import datetime
import os
import time
from pathlib import Path
from amscommon import read_config
import os
import glob
import time
from time import strftime
import datetime
import glob
import sys
import ProcessVideo as PV
import time
import os
import subprocess
import time
import sys
import cv2
import sys
import numpy as np
import os
from dateutil import parser
import requests
import os
import mimetypes
import sys
import datetime
import time
import settings
from amscommon import read_config
import requests
import mimetypes
import sys
import settings
from pathlib import Path
from amscommon import read_config, write_config
import requests
import os
import mimetypes
import sys
import datetime
import time
import settings
from amscommon import read_config
import requests
import os
import mimetypes
import sys
import datetime
import time
import settings
from amscommon import read_config
import requests
import os
import mimetypes
import sys
import datetime
import time
import settings
from amscommon import read_config
from PIL import Image
import cv2
import glob
import sys
import numpy as np
import datetime
import subprocess
import requests
import pytesseract
from io import BytesIO
from pathlib import Path
import glob
import collections
from collections import deque
from PIL import Image, ImageChops
from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
import ephem
import sys
import os
from amscommon import read_config, caldate
#from wand.image import Image
#from wand.display import display
import subprocess
import requests
import pytesseract
from io import BytesIO
from pathlib import Path
import glob
import collections
from collections import deque
from PIL import Image, ImageChops
from queue import Queue
import multiprocessing
import datetime
import cv2
import numpy as np
import iproc
import time
import ephem
import sys
import os
import settings
from amscommon import read_config, caldate
import cv2
import time
from collections import deque
import subprocess
import datetime
import os
from amscommon import read_config
import time
| gpl-3.0 |
jkbradley/spark | examples/src/main/python/sql/arrow.py | 3 | 9491 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
from __future__ import print_function
from pyspark.sql import SparkSession
from pyspark.sql.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
# $example on:dataframe_with_arrow$
import numpy as np
import pandas as pd
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
# $example off:dataframe_with_arrow$
print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def scalar_pandas_udf_example(spark):
# $example on:scalar_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a, b):
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
# Execute function as a Spark vectorized UDF
df.select(multiply(col("x"), col("x"))).show()
# +-------------------+
# |multiply_func(x, x)|
# +-------------------+
# | 1|
# | 4|
# | 9|
# +-------------------+
# $example off:scalar_pandas_udf$
def scalar_iter_pandas_udf_example(spark):
# $example on:scalar_iter_pandas_udf$
import pandas as pd
from pyspark.sql.functions import col, pandas_udf, struct, PandasUDFType
pdf = pd.DataFrame([1, 2, 3], columns=["x"])
df = spark.createDataFrame(pdf)
# When the UDF is called with a single column that is not StructType,
# the input to the underlying function is an iterator of pd.Series.
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def plus_one(batch_iter):
for x in batch_iter:
yield x + 1
df.select(plus_one(col("x"))).show()
# +-----------+
# |plus_one(x)|
# +-----------+
# | 2|
# | 3|
# | 4|
# +-----------+
# When the UDF is called with more than one columns,
# the input to the underlying function is an iterator of pd.Series tuple.
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def multiply_two_cols(batch_iter):
for a, b in batch_iter:
yield a * b
df.select(multiply_two_cols(col("x"), col("x"))).show()
# +-----------------------+
# |multiply_two_cols(x, x)|
# +-----------------------+
# | 1|
# | 4|
# | 9|
# +-----------------------+
# When the UDF is called with a single column that is StructType,
# the input to the underlying function is an iterator of pd.DataFrame.
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def multiply_two_nested_cols(pdf_iter):
for pdf in pdf_iter:
yield pdf["a"] * pdf["b"]
df.select(
multiply_two_nested_cols(
struct(col("x").alias("a"), col("x").alias("b"))
).alias("y")
).show()
# +---+
# | y|
# +---+
# | 1|
# | 4|
# | 9|
# +---+
# In the UDF, you can initialize some states before processing batches.
# Wrap your code with try/finally or use context managers to ensure
# the release of resources at the end.
y_bc = spark.sparkContext.broadcast(1)
@pandas_udf("long", PandasUDFType.SCALAR_ITER)
def plus_y(batch_iter):
y = y_bc.value # initialize states
try:
for x in batch_iter:
yield x + y
finally:
pass # release resources here, if any
df.select(plus_y(col("x"))).show()
# +---------+
# |plus_y(x)|
# +---------+
# | 2|
# | 3|
# | 4|
# +---------+
# $example off:scalar_iter_pandas_udf$
def grouped_map_pandas_udf_example(spark):
# $example on:grouped_map_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP)
def subtract_mean(pdf):
# pdf is a pandas.DataFrame
v = pdf.v
return pdf.assign(v=v - v.mean())
df.groupby("id").apply(subtract_mean).show()
# +---+----+
# | id| v|
# +---+----+
# | 1|-0.5|
# | 1| 0.5|
# | 2|-3.0|
# | 2|-1.0|
# | 2| 4.0|
# +---+----+
# $example off:grouped_map_pandas_udf$
def grouped_agg_pandas_udf_example(spark):
# $example on:grouped_agg_pandas_udf$
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql import Window
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
@pandas_udf("double", PandasUDFType.GROUPED_AGG)
def mean_udf(v):
return v.mean()
df.groupby("id").agg(mean_udf(df['v'])).show()
# +---+-----------+
# | id|mean_udf(v)|
# +---+-----------+
# | 1| 1.5|
# | 2| 6.0|
# +---+-----------+
w = Window \
.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
df.withColumn('mean_v', mean_udf(df['v']).over(w)).show()
# +---+----+------+
# | id| v|mean_v|
# +---+----+------+
# | 1| 1.0| 1.5|
# | 1| 2.0| 1.5|
# | 2| 3.0| 6.0|
# | 2| 5.0| 6.0|
# | 2|10.0| 6.0|
# +---+----+------+
# $example off:grouped_agg_pandas_udf$
def map_iter_pandas_udf_example(spark):
# $example on:map_iter_pandas_udf$
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
@pandas_udf(df.schema, PandasUDFType.MAP_ITER)
def filter_func(batch_iter):
for pdf in batch_iter:
yield pdf[pdf.id == 1]
df.mapInPandas(filter_func).show()
# +---+---+
# | id|age|
# +---+---+
# | 1| 21|
# +---+---+
# $example off:map_iter_pandas_udf$
def cogrouped_map_pandas_udf_example(spark):
# $example on:cogrouped_map_pandas_udf$
import pandas as pd
from pyspark.sql.functions import pandas_udf, PandasUDFType
df1 = spark.createDataFrame(
[(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
("time", "id", "v1"))
df2 = spark.createDataFrame(
[(20000101, 1, "x"), (20000101, 2, "y")],
("time", "id", "v2"))
@pandas_udf("time int, id int, v1 double, v2 string", PandasUDFType.COGROUPED_MAP)
def asof_join(l, r):
return pd.merge_asof(l, r, on="time", by="id")
df1.groupby("id").cogroup(df2.groupby("id")).apply(asof_join).show()
# +--------+---+---+---+
# | time| id| v1| v2|
# +--------+---+---+---+
# |20000101| 1|1.0| x|
# |20000102| 1|3.0| x|
# |20000101| 2|2.0| y|
# |20000102| 2|4.0| y|
# +--------+---+---+---+
# $example off:cogrouped_map_pandas_udf$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Arrow-in-Spark example") \
.getOrCreate()
print("Running Pandas to/from conversion example")
dataframe_with_arrow_example(spark)
print("Running pandas_udf scalar example")
scalar_pandas_udf_example(spark)
print("Running pandas_udf scalar iterator example")
scalar_iter_pandas_udf_example(spark)
print("Running pandas_udf grouped map example")
grouped_map_pandas_udf_example(spark)
print("Running pandas_udf grouped agg example")
grouped_agg_pandas_udf_example(spark)
print("Running pandas_udf map iterator example")
map_iter_pandas_udf_example(spark)
print("Running pandas_udf cogrouped map example")
cogrouped_map_pandas_udf_example(spark)
spark.stop()
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.