repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/core/indexing.py | 3 | 73208 | # pylint: disable=W0223
import warnings
import numpy as np
from pandas.compat import range, zip
import pandas.compat as compat
from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
from pandas.core.dtypes.common import (
is_integer_dtype,
is_integer, is_float,
is_list_like,
is_sequence,
is_iterator,
is_scalar,
is_sparse,
_is_unorderable_exception,
_ensure_platform_int)
from pandas.core.dtypes.missing import isnull, _infer_fill_value
from pandas.core.index import Index, MultiIndex
import pandas.core.common as com
from pandas.core.common import (is_bool_indexer, _asarray_tuplesafe,
is_null_slice, is_full_slice,
_values_from_object)
# the supported indexers
def get_indexers_list():
return [
('ix', _IXIndexer),
('iloc', _iLocIndexer),
('loc', _LocIndexer),
('at', _AtIndexer),
('iat', _iAtIndexer),
]
# "null slice"
_NS = slice(None, None)
# the public IndexSlicerMaker
class _IndexSlice(object):
"""
Create an object to more easily perform multi-index slicing
Examples
--------
>>> midx = pd.MultiIndex.from_product([['A0','A1'], ['B0','B1','B2','B3']])
>>> columns = ['foo', 'bar']
>>> dfmi = pd.DataFrame(np.arange(16).reshape((len(midx), len(columns))),
index=midx, columns=columns)
Using the default slice command:
>>> dfmi.loc[(slice(None), slice('B0', 'B1')), :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
Using the IndexSlice class for a more intuitive command:
>>> idx = pd.IndexSlice
>>> dfmi.loc[idx[:, 'B0':'B1'], :]
foo bar
A0 B0 0 1
B1 2 3
A1 B0 8 9
B1 10 11
"""
def __getitem__(self, arg):
return arg
IndexSlice = _IndexSlice()
class IndexingError(Exception):
pass
class _NDFrameIndexer(object):
_valid_types = None
_exception = KeyError
axis = None
def __init__(self, obj, name):
self.obj = obj
self.ndim = obj.ndim
self.name = name
def __call__(self, axis=None):
# we need to return a copy of ourselves
new_self = self.__class__(self.obj, self.name)
new_self.axis = axis
return new_self
def __iter__(self):
raise NotImplementedError('ix is not iterable')
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
try:
values = self.obj.get_value(*key)
if is_scalar(values):
return values
except Exception:
pass
return self._getitem_tuple(key)
else:
key = com._apply_if_callable(key, self.obj)
return self._getitem_axis(key, axis=0)
def _get_label(self, label, axis=0):
if self.ndim == 1:
# for perf reasons we want to try _xs first
# as its basically direct indexing
# but will fail when the index is not present
# see GH5667
try:
return self.obj._xs(label, axis=axis)
except:
return self.obj[label]
elif isinstance(label, tuple) and isinstance(label[axis], slice):
raise IndexingError('no slices here, handle elsewhere')
return self.obj._xs(label, axis=axis)
def _get_loc(self, key, axis=0):
return self.obj._ixs(key, axis=axis)
def _slice(self, obj, axis=0, kind=None):
return self.obj._slice(obj, axis=axis, kind=kind)
def _get_setitem_indexer(self, key):
if self.axis is not None:
return self._convert_tuple(key, is_setter=True)
axis = self.obj._get_axis(0)
if isinstance(axis, MultiIndex):
try:
return axis.get_loc(key)
except Exception:
pass
if isinstance(key, tuple):
try:
return self._convert_tuple(key, is_setter=True)
except IndexingError:
pass
if isinstance(key, range):
return self._convert_range(key, is_setter=True)
try:
return self._convert_to_indexer(key, is_setter=True)
except TypeError as e:
# invalid indexer type vs 'other' indexing errors
if 'cannot do' in str(e):
raise
raise IndexingError(key)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
else:
key = com._apply_if_callable(key, self.obj)
indexer = self._get_setitem_indexer(key)
self._setitem_with_indexer(indexer, value)
def _has_valid_type(self, k, axis):
raise NotImplementedError()
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if not self._has_valid_type(k, i):
raise ValueError("Location based indexing can only have [%s] "
"types" % self._valid_types)
def _should_validate_iterable(self, axis=0):
""" return a boolean whether this axes needs validation for a passed
iterable
"""
ax = self.obj._get_axis(axis)
if isinstance(ax, MultiIndex):
return False
elif ax.is_floating():
return False
return True
def _is_nested_tuple_indexer(self, tup):
if any([isinstance(ax, MultiIndex) for ax in self.obj.axes]):
return any([is_nested_tuple(tup, ax) for ax in self.obj.axes])
return False
def _convert_tuple(self, key, is_setter=False):
keyidx = []
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
for i in range(self.ndim):
if i == axis:
keyidx.append(self._convert_to_indexer(
key, axis=axis, is_setter=is_setter))
else:
keyidx.append(slice(None))
else:
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
idx = self._convert_to_indexer(k, axis=i, is_setter=is_setter)
keyidx.append(idx)
return tuple(keyidx)
def _convert_range(self, key, is_setter=False):
""" convert a range argument """
return list(key)
def _convert_scalar_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
# a scalar
return ax._convert_scalar_indexer(key, kind=self.name)
def _convert_slice_indexer(self, key, axis):
# if we are accessing via lowered dim, use the last dim
ax = self.obj._get_axis(min(axis, self.ndim - 1))
return ax._convert_slice_indexer(key, kind=self.name)
def _has_valid_setitem_indexer(self, indexer):
return True
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
"""
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
elif isinstance(i, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
return True
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
# also has the side effect of consolidating in-place
# TODO: Panel, DataFrame are not imported, remove?
from pandas import Panel, DataFrame, Series # noqa
info_axis = self.obj._info_axis_number
# maybe partial set
take_split_path = self.obj._is_mixed_type
# if there is only one block/type, still have to take split path
# unless the block is one-dimensional or it can hold the value
if not take_split_path and self.obj._data.blocks:
blk, = self.obj._data.blocks
if 1 < blk.ndim: # in case of dict, keys are indices
val = list(value.values()) if isinstance(value,
dict) else value
take_split_path = not blk._can_hold_element(val)
if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes):
for i, ax in zip(indexer, self.obj.axes):
# if we have any multi-indexes that have non-trivial slices
# (not null slices) then we must take the split path, xref
# GH 10360
if (isinstance(ax, MultiIndex) and
not (is_integer(i) or is_null_slice(i))):
take_split_path = True
break
if isinstance(indexer, tuple):
nindexer = []
for i, idx in enumerate(indexer):
if isinstance(idx, dict):
# reindex the axis to the new value
# and set inplace
key, _ = convert_missing_indexer(idx)
# if this is the items axes, then take the main missing
# path first
# this correctly sets the dtype and avoids cache issues
# essentially this separates out the block that is needed
# to possibly be modified
if self.ndim > 1 and i == self.obj._info_axis_number:
# add the new item, and set the value
# must have all defined axes if we have a scalar
# or a list-like on the non-info axes if we have a
# list-like
len_non_info_axes = [
len(_ax) for _i, _ax in enumerate(self.obj.axes)
if _i != i
]
if any([not l for l in len_non_info_axes]):
if not is_list_like_indexer(value):
raise ValueError("cannot set a frame with no "
"defined index and a scalar")
self.obj[key] = value
return self.obj
# add a new item with the dtype setup
self.obj[key] = _infer_fill_value(value)
new_indexer = convert_from_missing_indexer_tuple(
indexer, self.obj.axes)
self._setitem_with_indexer(new_indexer, value)
return self.obj
# reindex the axis
# make sure to clear the cache because we are
# just replacing the block manager here
# so the object is the same
index = self.obj._get_axis(i)
labels = index.insert(len(index), key)
self.obj._data = self.obj.reindex_axis(labels, i)._data
self.obj._maybe_update_cacher(clear=True)
self.obj.is_copy = None
nindexer.append(labels.get_loc(key))
else:
nindexer.append(idx)
indexer = tuple(nindexer)
else:
indexer, missing = convert_missing_indexer(indexer)
if missing:
# reindex the axis to the new value
# and set inplace
if self.ndim == 1:
index = self.obj.index
new_index = index.insert(len(index), indexer)
# we have a coerced indexer, e.g. a float
# that matches in an Int64Index, so
# we will not create a duplicate index, rather
# index to that element
# e.g. 0.0 -> 0
# GH12246
if index.is_unique:
new_indexer = index.get_indexer([new_index[-1]])
if (new_indexer != -1).any():
return self._setitem_with_indexer(new_indexer,
value)
# this preserves dtype of the value
new_values = Series([value])._values
if len(self.obj._values):
try:
new_values = np.concatenate([self.obj._values,
new_values])
except TypeError:
new_values = np.concatenate([self.obj.asobject,
new_values])
self.obj._data = self.obj._constructor(
new_values, index=new_index, name=self.obj.name)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
elif self.ndim == 2:
# no columns and scalar
if not len(self.obj.columns):
raise ValueError("cannot set a frame with no defined "
"columns")
# append a Series
if isinstance(value, Series):
value = value.reindex(index=self.obj.columns,
copy=True)
value.name = indexer
# a list-list
else:
# must have conforming columns
if is_list_like_indexer(value):
if len(value) != len(self.obj.columns):
raise ValueError("cannot set a row with "
"mismatched columns")
value = Series(value, index=self.obj.columns,
name=indexer)
self.obj._data = self.obj.append(value)._data
self.obj._maybe_update_cacher(clear=True)
return self.obj
# set using setitem (Panel and > dims)
elif self.ndim >= 3:
return self.obj.__setitem__(indexer, value)
# set
item_labels = self.obj._get_axis(info_axis)
# align and set the values
if take_split_path:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
if isinstance(value, ABCSeries):
value = self._align_series(indexer, value)
info_idx = indexer[info_axis]
if is_integer(info_idx):
info_idx = [info_idx]
labels = item_labels[info_idx]
# if we have a partial multiindex, then need to adjust the plane
# indexer here
if (len(labels) == 1 and
isinstance(self.obj[labels[0]].axes[0], MultiIndex)):
item = labels[0]
obj = self.obj[item]
index = obj.index
idx = indexer[:info_axis][0]
plane_indexer = tuple([idx]) + indexer[info_axis + 1:]
lplane_indexer = length_of_indexer(plane_indexer[0], index)
# require that we are setting the right number of values that
# we are indexing
if is_list_like_indexer(value) and np.iterable(
value) and lplane_indexer != len(value):
if len(obj[idx]) != len(value):
raise ValueError("cannot set using a multi-index "
"selection indexer with a different "
"length than the value")
# make sure we have an ndarray
value = getattr(value, 'values', value).ravel()
# we can directly set the series here
# as we select a slice indexer on the mi
idx = index._convert_slice_indexer(idx)
obj._consolidate_inplace()
obj = obj.copy()
obj._data = obj._data.setitem(indexer=tuple([idx]),
value=value)
self.obj[item] = obj
return
# non-mi
else:
plane_indexer = indexer[:info_axis] + indexer[info_axis + 1:]
if info_axis > 0:
plane_axis = self.obj.axes[:info_axis][0]
lplane_indexer = length_of_indexer(plane_indexer[0],
plane_axis)
else:
lplane_indexer = 0
def setter(item, v):
s = self.obj[item]
pi = plane_indexer[0] if lplane_indexer == 1 else plane_indexer
# perform the equivalent of a setitem on the info axis
# as we have a null slice or a slice with full bounds
# which means essentially reassign to the columns of a
# multi-dim object
# GH6149 (null slice), GH10408 (full bounds)
if (isinstance(pi, tuple) and
all(is_null_slice(idx) or
is_full_slice(idx, len(self.obj))
for idx in pi)):
s = v
else:
# set the item, possibly having a dtype change
s._consolidate_inplace()
s = s.copy()
s._data = s._data.setitem(indexer=pi, value=v)
s._maybe_update_cacher(clear=True)
# reset the sliced object if unique
self.obj[item] = s
def can_do_equal_len():
""" return True if we have an equal len settable """
if not len(labels) == 1 or not np.iterable(value):
return False
l = len(value)
item = labels[0]
index = self.obj[item].index
# equal len list/ndarray
if len(index) == l:
return True
elif lplane_indexer == l:
return True
return False
# we need an iterable, with a ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, 'ndim', 1) > 0:
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
sub_indexer = list(indexer)
multiindex_indexer = isinstance(labels, MultiIndex)
for item in labels:
if item in value:
sub_indexer[info_axis] = item
v = self._align_series(
tuple(sub_indexer), value[item],
multiindex_indexer)
else:
v = np.nan
setter(item, v)
# we have an equal len ndarray/convertible to our labels
elif np.array(value).ndim == 2:
# note that this coerces the dtype if we are mixed
# GH 7551
value = np.array(value, dtype=object)
if len(labels) != value.shape[1]:
raise ValueError('Must have equal len keys and value '
'when setting with an ndarray')
for i, item in enumerate(labels):
# setting with a list, recoerces
setter(item, value[:, i].tolist())
# we have an equal len list/ndarray
elif can_do_equal_len():
setter(labels[0], value)
# per label values
else:
if len(labels) != len(value):
raise ValueError('Must have equal len keys and value '
'when setting with an iterable')
for item, v in zip(labels, value):
setter(item, v)
else:
# scalar
for item in labels:
setter(item, value)
else:
if isinstance(indexer, tuple):
indexer = maybe_convert_ix(*indexer)
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (len(indexer) > info_axis and
is_integer(indexer[info_axis]) and
all(is_null_slice(idx) for i, idx in enumerate(indexer)
if i != info_axis) and item_labels.is_unique):
self.obj[item_labels[indexer[info_axis]]] = value
return
if isinstance(value, (ABCSeries, dict)):
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame):
value = self._align_frame(indexer, value)
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
self.obj._data = self.obj._data.setitem(indexer=indexer,
value=value)
self.obj._maybe_update_cacher(clear=True)
def _align_series(self, indexer, ser, multiindex_indexer=False):
"""
Parameters
----------
indexer : tuple, slice, scalar
The indexer used to get the locations that will be set to
`ser`
ser : pd.Series
The values to assign to the locations specified by `indexer`
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns:
--------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# panel
elif is_panel:
single_aligner = (single_aligner and
(aligners[1] or aligners[2]))
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if (sum_aligners == self.ndim and
all([is_sequence(_) for _ in indexer])):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
l = len(indexer[1])
ser = np.tile(ser, l).reshape(l, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
if single_aligner and is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner and is_frame:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
# >2 dims
elif single_aligner:
broadcast = []
for n, labels in enumerate(self.obj._get_plane_axes(i)):
# reindex along the matching dimensions
if len(labels & ser.index):
ser = ser.reindex(labels)
else:
broadcast.append((n, len(labels)))
# broadcast along other dims
ser = ser._values.copy()
for (axis, l) in broadcast:
shape = [-1] * (len(broadcast) + 1)
shape[axis] = l
ser = np.tile(ser, l).reshape(shape)
if self.obj.ndim == 3:
ser = ser.T
return ser
elif is_scalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError('Incompatible indexer with Series')
def _align_frame(self, indexer, df):
is_frame = self.obj.ndim == 2
is_panel = self.obj.ndim >= 3
if isinstance(indexer, tuple):
aligners = [not is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
# TODO: single_aligner is not used
single_aligner = sum_aligners == 1 # noqa
idx, cols = None, None
sindexers = []
for i, ix in enumerate(indexer):
ax = self.obj.axes[i]
if is_sequence(ix) or isinstance(ix, slice):
if idx is None:
idx = ax[ix].ravel()
elif cols is None:
cols = ax[ix].ravel()
else:
break
else:
sindexers.append(i)
# panel
if is_panel:
# need to conform to the convention
# as we are not selecting on the items axis
# and we have a single indexer
# GH 7763
if len(sindexers) == 1 and sindexers[0] != 0:
df = df.T
if idx is None:
idx = df.index
if cols is None:
cols = df.columns
if idx is not None and cols is not None:
if df.index.equals(idx) and df.columns.equals(cols):
val = df.copy()._values
else:
val = df.reindex(idx, columns=cols)._values
return val
elif ((isinstance(indexer, slice) or is_list_like_indexer(indexer)) and
is_frame):
ax = self.obj.index[indexer]
if df.index.equals(ax):
val = df.copy()._values
else:
# we have a multi-index and are trying to align
# with a particular, level GH3738
if (isinstance(ax, MultiIndex) and
isinstance(df.index, MultiIndex) and
ax.nlevels != df.index.nlevels):
raise TypeError("cannot align on a multi-index with out "
"specifying the join levels")
val = df.reindex(index=ax)._values
return val
elif is_scalar(indexer) and is_panel:
idx = self.obj.axes[1]
cols = self.obj.axes[2]
# by definition we are indexing on the 0th axis
# a passed in dataframe which is actually a transpose
# of what is needed
if idx.equals(df.index) and cols.equals(df.columns):
return df.copy()._values
return df.reindex(idx, columns=cols)._values
raise ValueError('Incompatible indexer with DataFrame')
def _align_panel(self, indexer, df):
# TODO: is_frame, is_panel are unused
is_frame = self.obj.ndim == 2 # noqa
is_panel = self.obj.ndim >= 3 # noqa
raise NotImplementedError("cannot set using an indexer with a Panel "
"yet!")
def _getitem_tuple(self, tup):
try:
return self._getitem_lowerdim(tup)
except IndexingError:
pass
# no multi-index, so validate all of the indexers
self._has_valid_tuple(tup)
# ugly hack for GH #836
if self._multi_take_opportunity(tup):
return self._multi_take(tup)
# no shortcut needed
retval = self.obj
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=i)
return retval
def _multi_take_opportunity(self, tup):
from pandas.core.generic import NDFrame
# ugly hack for GH #836
if not isinstance(self.obj, NDFrame):
return False
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
for indexer, ax in zip(tup, self.obj._data.axes):
if isinstance(ax, MultiIndex):
return False
elif is_bool_indexer(indexer):
return False
elif not ax.is_unique:
return False
return True
def _multi_take(self, tup):
""" create the reindex map for our objects, raise the _exception if we
can't create the indexer
"""
try:
o = self.obj
d = dict(
[(a, self._convert_for_reindex(t, axis=o._get_axis_number(a)))
for t, a in zip(tup, o._AXIS_ORDERS)])
return o.reindex(**d)
except(KeyError, IndexingError):
raise self._exception
def _convert_for_reindex(self, key, axis=0):
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
return labels[key]
else:
if isinstance(key, Index):
keyarr = labels._convert_index_indexer(key)
else:
# asarray can be unsafe, NumPy strings are weird
keyarr = _asarray_tuplesafe(key)
if is_integer_dtype(keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = labels._convert_arr_indexer(keyarr)
if not labels.is_integer():
keyarr = _ensure_platform_int(keyarr)
return labels.take(keyarr)
return keyarr
def _handle_lowerdim_multi_index_axis0(self, tup):
# we have an axis0 multi-index, handle or raise
try:
# fast path for series or for tup devoid of slices
return self._get_label(tup, axis=0)
except TypeError:
# slices are unhashable
pass
except Exception as e1:
if isinstance(tup[0], (slice, Index)):
raise IndexingError("Handle elsewhere")
# raise the error if we are not sorted
ax0 = self.obj._get_axis(0)
if not ax0.is_lexsorted_for_tuple(tup):
raise e1
return None
def _getitem_lowerdim(self, tup):
# we can directly get the axis result since the axis is specified
if self.axis is not None:
axis = self.obj._get_axis_number(self.axis)
return self._getitem_axis(tup, axis=axis)
# we may have a nested tuples indexer here
if self._is_nested_tuple_indexer(tup):
return self._getitem_nested_tuple(tup)
# we maybe be using a tuple to represent multiple dimensions here
ax0 = self.obj._get_axis(0)
# ...but iloc should handle the tuple as simple integer-location
# instead of checking it as multiindex representation (GH 13797)
if isinstance(ax0, MultiIndex) and self.name != 'iloc':
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
if len(tup) > self.obj.ndim:
raise IndexingError("Too many indexers. handle elsewhere")
# to avoid wasted computation
# df.ix[d1:d2, 0] -> columns first (True)
# df.ix[0, ['C', 'B', A']] -> rows first (False)
for i, key in enumerate(tup):
if is_label_like(key) or isinstance(key, tuple):
section = self._getitem_axis(key, axis=i)
# we have yielded a scalar ?
if not is_list_like_indexer(section):
return section
elif section.ndim == self.ndim:
# we're in the middle of slicing through a MultiIndex
# revise the key wrt to `section` by inserting an _NS
new_key = tup[:i] + (_NS,) + tup[i + 1:]
else:
new_key = tup[:i] + tup[i + 1:]
# unfortunately need an odious kludge here because of
# DataFrame transposing convention
if (isinstance(section, ABCDataFrame) and i > 0 and
len(new_key) == 2):
a, b = new_key
new_key = b, a
if len(new_key) == 1:
new_key, = new_key
# This is an elided recursive call to iloc/loc/etc'
return getattr(section, self.name)[new_key]
raise IndexingError('not applicable')
def _getitem_nested_tuple(self, tup):
# we have a nested tuple so have at least 1 multi-index level
# we should be able to match up the dimensionaility here
# we have too many indexers for our dim, but have at least 1
# multi-index dimension, try to see if we have something like
# a tuple passed to a series with a multi-index
if len(tup) > self.ndim:
result = self._handle_lowerdim_multi_index_axis0(tup)
if result is not None:
return result
# this is a series with a multi-index specified a tuple of
# selectors
return self._getitem_axis(tup, axis=0)
# handle the multi-axis by taking sections and reducing
# this is iterative
obj = self.obj
axis = 0
for i, key in enumerate(tup):
if is_null_slice(key):
axis += 1
continue
current_ndim = obj.ndim
obj = getattr(obj, self.name)._getitem_axis(key, axis=axis)
axis += 1
# if we have a scalar, we are done
if is_scalar(obj) or not hasattr(obj, 'ndim'):
break
# has the dim of the obj changed?
# GH 7199
if obj.ndim < current_ndim:
# GH 7516
# if had a 3 dim and are going to a 2d
# axes are reversed on a DataFrame
if i >= 1 and current_ndim == 3 and obj.ndim == 2:
obj = obj.T
axis -= 1
return obj
def _getitem_axis(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if isinstance(key, slice):
return self._get_slice_axis(key, axis=axis)
elif (is_list_like_indexer(key) and
not (isinstance(key, tuple) and
isinstance(labels, MultiIndex))):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
else:
# maybe coerce a float scalar to integer
key = labels._maybe_cast_indexer(key)
if is_integer(key):
if axis == 0 and isinstance(labels, MultiIndex):
try:
return self._get_label(key, axis=axis)
except (KeyError, TypeError):
if self.obj.index.levels[0].is_integer():
raise
# this is the fallback! (for a non-float, non-integer index)
if not labels.is_floating() and not labels.is_integer():
return self._get_loc(key, axis=axis)
return self._get_label(key, axis=axis)
def _getitem_iterable(self, key, axis=0):
if self._should_validate_iterable(axis):
self._has_valid_type(key, axis)
labels = self.obj._get_axis(axis)
if is_bool_indexer(key):
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
return self.obj.take(inds, axis=axis, convert=False)
else:
# Have the index compute an indexer or return None
# if it cannot handle; we only act on all found values
indexer, keyarr = labels._convert_listlike_indexer(
key, kind=self.name)
if indexer is not None and (indexer != -1).all():
return self.obj.take(indexer, axis=axis)
# existing labels are unique and indexer are unique
if labels.is_unique and Index(keyarr).is_unique:
try:
return self.obj.reindex_axis(keyarr, axis=axis)
except AttributeError:
# Series
if axis != 0:
raise AssertionError('axis must be 0')
return self.obj.reindex(keyarr)
# existing labels are non-unique
else:
# reindex with the specified axis
if axis + 1 > self.obj.ndim:
raise AssertionError("invalid indexing error with "
"non-unique index")
new_target, indexer, new_indexer = labels._reindex_non_unique(
keyarr)
if new_indexer is not None:
result = self.obj.take(indexer[indexer != -1], axis=axis,
convert=False)
result = result._reindex_with_indexers(
{axis: [new_target, new_indexer]},
copy=True, allow_dups=True)
else:
result = self.obj.take(indexer, axis=axis, convert=False)
return result
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
# Have the index compute an indexer or return None
# if it cannot handle
indexer, objarr = labels._convert_listlike_indexer(
obj, kind=self.name)
if indexer is not None:
return indexer
# unique index
if labels.is_unique:
indexer = check = labels.get_indexer(objarr)
# non-unique (dups)
else:
(indexer,
missing) = labels.get_indexer_non_unique(objarr)
# 'indexer' has dupes, create 'check' using 'missing'
check = np.zeros_like(objarr)
check[missing] = -1
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % objarr[mask])
return _values_from_object(indexer)
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise
def _tuplify(self, loc):
tup = [slice(None, None) for _ in range(self.ndim)]
tup[0] = loc
return tuple(tup)
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj
indexer = self._convert_slice_indexer(slice_obj, axis)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _IXIndexer(_NDFrameIndexer):
"""A primarily label-location based indexer, with integer position
fallback.
``.ix[]`` supports mixed integer and label based access. It is
primarily label based, but will fall back to integer positional
access unless the corresponding axis is of integer type.
``.ix`` is the most general indexer and will support any of the
inputs in ``.loc`` and ``.iloc``. ``.ix`` also supports floating
point label schemes. ``.ix`` is exceptionally useful when dealing
with mixed positional and label based hierachical indexes.
However, when an axis is integer based, ONLY label based access
and not positional access is supported. Thus, in such cases, it's
usually better to be explicit and use ``.iloc`` or ``.loc``.
See more at :ref:`Advanced Indexing <advanced>`.
"""
def __init__(self, obj, name):
_ix_deprecation_warning = """
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate_ix"""
warnings.warn(_ix_deprecation_warning,
DeprecationWarning, stacklevel=3)
super(_IXIndexer, self).__init__(obj, name)
def _has_valid_type(self, key, axis):
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
return True
else:
self._convert_scalar_indexer(key, axis)
return True
class _LocationIndexer(_NDFrameIndexer):
_exception = Exception
def __getitem__(self, key):
if type(key) is tuple:
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
try:
if self._is_scalar_access(key):
return self._getitem_scalar(key)
except (KeyError, IndexError):
pass
return self._getitem_tuple(key)
else:
key = com._apply_if_callable(key, self.obj)
return self._getitem_axis(key, axis=0)
def _is_scalar_access(self, key):
raise NotImplementedError()
def _getitem_scalar(self, key):
raise NotImplementedError()
def _getitem_axis(self, key, axis=0):
raise NotImplementedError()
def _getbool_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = check_bool_indexer(labels, key)
inds, = key.nonzero()
try:
return self.obj.take(inds, axis=axis, convert=False)
except Exception as detail:
raise self._exception(detail)
def _get_slice_axis(self, slice_obj, axis=0):
""" this is pretty simple as we just have to deal with labels """
obj = self.obj
if not need_slice(slice_obj):
return obj
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
slice_obj.step, kind=self.name)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj.take(indexer, axis=axis, convert=False)
class _LocIndexer(_LocationIndexer):
"""Purely label-location based indexer for selection by label.
``.loc[]`` is primarily label based, but may also be used with a
boolean array.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index).
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'`` (note that contrary
to usual python slices, **both** the start and the stop are included!).
- A boolean array.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
``.loc`` will raise a ``KeyError`` when the items are not found.
See more at :ref:`Selection by Label <indexing.label>`
"""
_valid_types = ("labels (MUST BE IN THE INDEX), slices of labels (BOTH "
"endpoints included! Can be slices of integers if the "
"index is integers), listlike of labels, boolean")
_exception = KeyError
def _has_valid_type(self, key, axis):
ax = self.obj._get_axis(axis)
# valid for a label where all labels are in the index
# slice of lables (where start-end in labels)
# slice of integers (only if in the lables)
# boolean
if isinstance(key, slice):
return True
elif is_bool_indexer(key):
return True
elif is_list_like_indexer(key):
# mi is just a passthru
if isinstance(key, tuple) and isinstance(ax, MultiIndex):
return True
# TODO: don't check the entire key unless necessary
if (not is_iterator(key) and len(key) and
np.all(ax.get_indexer_for(key) < 0)):
raise KeyError("None of [%s] are in the [%s]" %
(key, self.obj._get_axis_name(axis)))
return True
else:
def error():
if isnull(key):
raise TypeError("cannot use label indexing with a null "
"key")
raise KeyError("the label [%s] is not in the [%s]" %
(key, self.obj._get_axis_name(axis)))
try:
key = self._convert_scalar_indexer(key, axis)
if not ax.contains(key):
error()
except TypeError as e:
# python 3 type errors should be raised
if _is_unorderable_exception(e):
error()
raise
except:
error()
return True
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if not hasattr(key, '__len__'):
return False
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_scalar(k):
return False
ax = self.obj.axes[i]
if isinstance(ax, MultiIndex):
return False
if not ax.is_unique:
return False
return True
def _getitem_scalar(self, key):
# a fast-path to scalar access
# if not, raise
values = self.obj.get_value(*key)
return values
def _get_partial_string_timestamp_match_key(self, key, labels):
"""Translate any partial string timestamp matches in key, returning the
new key (GH 10331)"""
if isinstance(labels, MultiIndex):
if isinstance(key, compat.string_types) and \
labels.levels[0].is_all_dates:
# Convert key '2016-01-01' to
# ('2016-01-01'[, slice(None, None, None)]+)
key = tuple([key] + [slice(None)] * (len(labels.levels) - 1))
if isinstance(key, tuple):
# Convert (..., '2016-01-01', ...) in tuple to
# (..., slice('2016-01-01', '2016-01-01', None), ...)
new_key = []
for i, component in enumerate(key):
if isinstance(component, compat.string_types) and \
labels.levels[i].is_all_dates:
new_key.append(slice(component, component, None))
else:
new_key.append(component)
key = tuple(new_key)
return key
def _getitem_axis(self, key, axis=0):
labels = self.obj._get_axis(axis)
key = self._get_partial_string_timestamp_match_key(key, labels)
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
elif is_bool_indexer(key):
return self._getbool_axis(key, axis=axis)
elif is_list_like_indexer(key):
# convert various list-like indexers
# to a list of keys
# we will use the *values* of the object
# and NOT the index if its a PandasObject
if isinstance(labels, MultiIndex):
if isinstance(key, (ABCSeries, np.ndarray)) and key.ndim <= 1:
# Series, or 0,1 ndim ndarray
# GH 14730
key = list(key)
elif isinstance(key, ABCDataFrame):
# GH 15438
raise NotImplementedError("Indexing a MultiIndex with a "
"DataFrame key is not "
"implemented")
elif hasattr(key, 'ndim') and key.ndim > 1:
raise NotImplementedError("Indexing a MultiIndex with a "
"multidimensional key is not "
"implemented")
if (not isinstance(key, tuple) and len(key) > 1 and
not isinstance(key[0], tuple)):
key = tuple([key])
# an iterable multi-selection
if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)):
if hasattr(key, 'ndim') and key.ndim > 1:
raise ValueError('Cannot index with multidimensional key')
return self._getitem_iterable(key, axis=axis)
# nested tuple slicing
if is_nested_tuple(key, labels):
locs = labels.get_locs(key)
indexer = [slice(None)] * self.ndim
indexer[axis] = locs
return self.obj.iloc[tuple(indexer)]
# fall thru to straight lookup
self._has_valid_type(key, axis)
return self._get_label(key, axis=axis)
class _iLocIndexer(_LocationIndexer):
"""Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a boolean
array.
Allowed inputs are:
- An integer, e.g. ``5``.
- A list or array of integers, e.g. ``[4, 3, 0]``.
- A slice object with ints, e.g. ``1:7``.
- A boolean array.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See more at :ref:`Selection by Position <indexing.integer>`
"""
_valid_types = ("integer, integer slice (START point is INCLUDED, END "
"point is EXCLUDED), listlike of integers, boolean array")
_exception = IndexError
def _has_valid_type(self, key, axis):
if is_bool_indexer(key):
if hasattr(key, 'index') and isinstance(key.index, Index):
if key.index.inferred_type == 'integer':
raise NotImplementedError("iLocation based boolean "
"indexing on an integer type "
"is not available")
raise ValueError("iLocation based boolean indexing cannot use "
"an indexable as a mask")
return True
if isinstance(key, slice):
return True
elif is_integer(key):
return self._is_valid_integer(key, axis)
elif is_list_like_indexer(key):
return self._is_valid_list_like(key, axis)
return False
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _is_scalar_access(self, key):
# this is a shortcut accessor to both .loc and .iloc
# that provide the equivalent access of .at and .iat
# a) avoid getting things via sections and (to minimize dtype changes)
# b) provide a performant path
if not hasattr(key, '__len__'):
return False
if len(key) != self.ndim:
return False
for i, k in enumerate(key):
if not is_integer(k):
return False
ax = self.obj.axes[i]
if not ax.is_unique:
return False
return True
def _getitem_scalar(self, key):
# a fast-path to scalar access
# if not, raise
values = self.obj.get_value(*key, takeable=True)
return values
def _is_valid_integer(self, key, axis):
# return a boolean if we have a valid integer indexer
ax = self.obj._get_axis(axis)
l = len(ax)
if key >= l or key < -l:
raise IndexError("single positional indexer is out-of-bounds")
return True
def _is_valid_list_like(self, key, axis):
# return a boolean if we are a valid list-like (e.g. that we don't
# have out-of-bounds values)
# a tuple should already have been caught by this point
# so don't treat a tuple as a valid indexer
if isinstance(key, tuple):
raise IndexingError('Too many indexers')
# coerce the key to not exceed the maximum size of the index
arr = np.array(key)
ax = self.obj._get_axis(axis)
l = len(ax)
if (hasattr(arr, '__len__') and len(arr) and
(arr.max() >= l or arr.min() < -l)):
raise IndexError("positional indexers are out-of-bounds")
return True
def _getitem_tuple(self, tup):
self._has_valid_tuple(tup)
try:
return self._getitem_lowerdim(tup)
except:
pass
retval = self.obj
axis = 0
for i, key in enumerate(tup):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
if is_null_slice(key):
axis += 1
continue
retval = getattr(retval, self.name)._getitem_axis(key, axis=axis)
# if the dim was reduced, then pass a lower-dim the next time
if retval.ndim < self.ndim:
axis -= 1
# try to get for the next axis
axis += 1
return retval
def _get_slice_axis(self, slice_obj, axis=0):
obj = self.obj
if not need_slice(slice_obj):
return obj
slice_obj = self._convert_slice_indexer(slice_obj, axis)
if isinstance(slice_obj, slice):
return self._slice(slice_obj, axis=axis, kind='iloc')
else:
return self.obj.take(slice_obj, axis=axis, convert=False)
def _get_list_axis(self, key, axis=0):
"""
Return Series values by list or array of integers
Parameters
----------
key : list-like positional indexer
axis : int (can only be zero)
Returns
-------
Series object
"""
try:
return self.obj.take(key, axis=axis, convert=False)
except IndexError:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds")
def _getitem_axis(self, key, axis=0):
if isinstance(key, slice):
self._has_valid_type(key, axis)
return self._get_slice_axis(key, axis=axis)
if isinstance(key, list):
try:
key = np.asarray(key)
except TypeError: # pragma: no cover
pass
if is_bool_indexer(key):
self._has_valid_type(key, axis)
return self._getbool_axis(key, axis=axis)
# a list of integers
elif is_list_like_indexer(key):
return self._get_list_axis(key, axis=axis)
# a single integer
else:
key = self._convert_scalar_indexer(key, axis)
if not is_integer(key):
raise TypeError("Cannot index by location index with a "
"non-integer key")
# validate the location
self._is_valid_integer(key, axis)
return self._get_loc(key, axis=axis)
def _convert_to_indexer(self, obj, axis=0, is_setter=False):
""" much simpler as we only have to deal with our valid types """
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
elif self._has_valid_type(obj, axis):
return obj
raise ValueError("Can only index by location with a [%s]" %
self._valid_types)
class _ScalarAccessIndexer(_NDFrameIndexer):
""" access scalars quickly """
def _convert_key(self, key, is_setter=False):
return list(key)
def __getitem__(self, key):
if not isinstance(key, tuple):
# we could have a convertible item here (e.g. Timestamp)
if not is_list_like_indexer(key):
key = tuple([key])
else:
raise ValueError('Invalid call for scalar access (getting)!')
key = self._convert_key(key)
return self.obj.get_value(*key, takeable=self._takeable)
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = tuple(com._apply_if_callable(x, self.obj) for x in key)
else:
# scalar callable may return tuple
key = com._apply_if_callable(key, self.obj)
if not isinstance(key, tuple):
key = self._tuplify(key)
if len(key) != self.obj.ndim:
raise ValueError('Not enough indexers for scalar access '
'(setting)!')
key = list(self._convert_key(key, is_setter=True))
key.append(value)
self.obj.set_value(*key, takeable=self._takeable)
class _AtIndexer(_ScalarAccessIndexer):
"""Fast label-based scalar accessor
Similarly to ``loc``, ``at`` provides **label** based scalar lookups.
You can also set using these indexers.
"""
_takeable = False
def _convert_key(self, key, is_setter=False):
""" require they keys to be the same type as the index (so we don't
fallback)
"""
# allow arbitrary setting
if is_setter:
return list(key)
for ax, i in zip(self.obj.axes, key):
if ax.is_integer():
if not is_integer(i):
raise ValueError("At based indexing on an integer index "
"can only have integer indexers")
else:
if is_integer(i):
raise ValueError("At based indexing on an non-integer "
"index can only have non-integer "
"indexers")
return key
class _iAtIndexer(_ScalarAccessIndexer):
"""Fast integer location scalar accessor.
Similarly to ``iloc``, ``iat`` provides **integer** based lookups.
You can also set using these indexers.
"""
_takeable = True
def _has_valid_setitem_indexer(self, indexer):
self._has_valid_positional_setitem_indexer(indexer)
def _convert_key(self, key, is_setter=False):
""" require integer args (and convert to label arguments) """
for a, i in zip(self.obj.axes, key):
if not is_integer(i):
raise ValueError("iAt based indexing can only have integer "
"indexers")
return key
# 32-bit floating point machine epsilon
_eps = 1.1920929e-07
def length_of_indexer(indexer, target=None):
"""return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
l = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += l
if stop is None or stop > l:
stop = l
elif stop < 0:
stop += l
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def convert_to_index_sliceable(obj, key):
"""if we are index sliceable, then return my slicer, otherwise return None
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind='getitem')
elif isinstance(key, compat.string_types):
# we are an actual column
if obj._data.items.contains(key):
return None
# We might have a datetimelike string that we can translate to a
# slice here via partial string indexing
if idx.is_all_dates:
try:
return idx._get_string_slice(key)
except (KeyError, ValueError, NotImplementedError):
return None
return None
def is_index_slice(obj):
def _is_valid_index(x):
return (is_integer(x) or is_float(x) and
np.allclose(x, int(x), rtol=_eps, atol=0))
def _crit(v):
return v is None or _is_valid_index(v)
both_none = obj.start is None and obj.stop is None
return not both_none and (_crit(obj.start) and _crit(obj.stop))
def check_bool_indexer(ax, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
# this function assumes that is_bool_indexer(key) == True
result = key
if isinstance(key, ABCSeries) and not key.index.equals(ax):
result = result.reindex(ax)
mask = isnull(result._values)
if mask.any():
raise IndexingError('Unalignable boolean Series provided as '
'indexer (index of the boolean Series and of '
'the indexed object do not match')
result = result.astype(bool)._values
elif is_sparse(result):
result = result.to_dense()
result = np.asarray(result, dtype=bool)
else:
# is_bool_indexer has already checked for nulls in the case of an
# object array key, so no check needed here
result = np.asarray(result, dtype=bool)
return result
def convert_missing_indexer(indexer):
""" reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted
"""
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer['key']
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
def convert_from_missing_indexer_tuple(indexer, axes):
""" create a filtered indexer that doesn't have any missing indexers """
def get_indexer(_i, _idx):
return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else
_idx)
return tuple([get_indexer(_i, _idx) for _i, _idx in enumerate(indexer)])
def maybe_convert_indices(indices, n):
""" if we have negative indicies, translate to postive here
if have indicies that are out-of-bounds, raise an IndexError
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.int_)
mask = indices < 0
if mask.any():
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
def is_nested_tuple(tup, labels):
# check for a compatiable nested tuple and multiindexes among the axes
if not isinstance(tup, tuple):
return False
# are we nested tuple of: tuple,list,slice
for i, k in enumerate(tup):
if isinstance(k, (tuple, list, slice)):
return isinstance(labels, MultiIndex)
return False
def is_list_like_indexer(key):
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and
type(key) is not tuple)
def is_label_like(key):
# select a label or row
return not isinstance(key, slice) and not is_list_like_indexer(key)
def need_slice(obj):
return (obj.start is not None or obj.stop is not None or
(obj.step is not None and obj.step != 1))
def maybe_droplevels(index, key):
# drop levels
original_index = index
if isinstance(key, tuple):
for _ in key:
try:
index = index.droplevel(0)
except:
# we have dropped too much, so back out
return original_index
else:
try:
index = index.droplevel(0)
except:
pass
return index
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = tuple(list(compat.string_types) + [ABCSeries, np.ndarray, Index,
list])
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce
return isinstance(part, slice) or is_list_like(part)
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
| mit |
leighpauls/k2cro4 | native_client/buildbot/buildbot_standard.py | 1 | 14108 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Enable 'with' statements in Python 2.5
from __future__ import with_statement
import os.path
import re
import subprocess
import sys
from buildbot_lib import (
BuildContext, BuildStatus, Command, EnsureDirectoryExists,
ParseStandardCommandLine, RemoveDirectory, RunBuild, SCons, Step, StepLink,
StepText, TryToCleanContents)
# Windows-specific environment manipulation
def SetupWindowsEnvironment(context):
# Blow away path for now if on the bots (to be more hermetic).
if os.environ.get('BUILDBOT_SLAVENAME'):
paths = [
r'c:\b\depot_tools',
r'c:\b\depot_tools\python_bin',
r'c:\b\build_internal\tools',
r'e:\b\depot_tools',
r'e:\b\depot_tools\python_bin',
r'e:\b\build_internal\tools',
r'C:\WINDOWS\system32',
r'C:\WINDOWS\system32\WBEM',
]
context.SetEnv('PATH', os.pathsep.join(paths))
# Poke around looking for MSVC. We should do something more principled in
# the future.
# The name of Program Files can differ, depending on the bittage of Windows.
program_files = r'c:\Program Files (x86)'
if not os.path.exists(program_files):
program_files = r'c:\Program Files'
if not os.path.exists(program_files):
raise Exception('Cannot find the Program Files directory!')
# The location of MSVC can differ depending on the version.
msvc_locs = [
('Microsoft Visual Studio 10.0', 'VS100COMNTOOLS', '2010'),
('Microsoft Visual Studio 9.0', 'VS90COMNTOOLS', '2008'),
('Microsoft Visual Studio 8.0', 'VS80COMNTOOLS', '2005'),
]
for dirname, comntools_var, gyp_msvs_version in msvc_locs:
msvc = os.path.join(program_files, dirname)
context.SetEnv('GYP_MSVS_VERSION', gyp_msvs_version)
if os.path.exists(msvc):
break
else:
# The break statement did not execute.
raise Exception('Cannot find MSVC!')
# Put MSVC in the path.
vc = os.path.join(msvc, 'VC')
comntools = os.path.join(msvc, 'Common7', 'Tools')
perf = os.path.join(msvc, 'Team Tools', 'Performance Tools')
context.SetEnv('PATH', os.pathsep.join([
context.GetEnv('PATH'),
vc,
comntools,
perf]))
# SCons needs this variable to find vsvars.bat.
# The end slash is needed because the batch files expect it.
context.SetEnv(comntools_var, comntools + '\\')
# This environment variable will SCons to print debug info while it searches
# for MSVC.
context.SetEnv('SCONS_MSCOMMON_DEBUG', '-')
# Needed for finding devenv.
context['msvc'] = msvc
# The context on other systems has GYP_DEFINES set, set it for windows to be
# able to save and restore without KeyError.
context.SetEnv('GYP_DEFINES', '')
def SetupGypDefines(context, extra_vars=[]):
context.SetEnv('GYP_DEFINES', ' '.join(context['gyp_vars'] + extra_vars))
def SetupLinuxEnvironment(context):
SetupGypDefines(context, ['target_arch='+context['gyp_arch']])
def SetupMacEnvironment(context):
SetupGypDefines(context)
context.SetEnv('GYP_GENERATORS', 'ninja')
def SetupContextVars(context):
# The branch is set to native_client on the main bots, on the trybots it's
# set to ''. Otherwise, we should assume a particular branch is being used.
context['branch'] = os.environ.get('BUILDBOT_BRANCH', 'native_client')
context['off_trunk'] = context['branch'] not in ['native_client', '']
def ValidatorTest(context, architecture, validator, warn_only=False):
cmd=[
sys.executable,
'tests/abi_corpus/validator_regression_test.py',
'--keep-going',
'--validator', validator,
'--arch', architecture
]
if warn_only:
cmd.append('--warn-only')
Command(context, cmd=cmd)
def CommandGypBuild(context):
if context.Windows():
Command(
context,
cmd=[os.path.join(context['msvc'], 'Common7', 'IDE', 'devenv.com'),
r'build\all.sln',
'/build', context['gyp_mode']])
elif context.Linux():
Command(context, cmd=['make', '-C', '..', '-k',
'-j%d' % context['max_jobs'], 'V=1',
'BUILDTYPE=' + context['gyp_mode']])
elif context.Mac():
Command(context, cmd=[
'ninja', '-k', '0', '-C', '../out/' + context['gyp_mode']])
else:
raise Exception('Unknown platform')
def CommandGypGenerate(context):
Command(
context,
cmd=[
sys.executable,
'native_client/build/gyp_nacl',
'native_client/build/all.gyp',
],
cwd='..')
def CommandGclientRunhooks(context):
if context.Windows():
gclient = 'gclient.bat'
else:
gclient = 'gclient'
print 'Running gclient runhooks...'
print 'GYP_DEFINES=' + context.GetEnv('GYP_DEFINES')
Command(context, cmd=[gclient, 'runhooks', '--force'])
def RemoveGypBuildDirectories():
# Remove all directories on all platforms. Overkill, but it allows for
# straight-line code.
# Windows
RemoveDirectory('build/Debug')
RemoveDirectory('build/Release')
RemoveDirectory('build/Debug-Win32')
RemoveDirectory('build/Release-Win32')
RemoveDirectory('build/Debug-x64')
RemoveDirectory('build/Release-x64')
# Linux and Mac
RemoveDirectory('hg')
RemoveDirectory('../xcodebuild')
RemoveDirectory('../sconsbuild')
RemoveDirectory('../out')
RemoveDirectory('src/third_party/nacl_sdk/arm-newlib')
def BuildScript(status, context):
inside_toolchain = context['inside_toolchain']
# Clean out build directories.
with Step('clobber', status):
RemoveDirectory(r'scons-out')
RemoveGypBuildDirectories()
with Step('cleanup_temp', status):
# Picking out drive letter on which the build is happening so we can use
# it for the temp directory.
if context.Windows():
build_drive = os.path.splitdrive(os.path.abspath(__file__))[0]
tmp_dir = os.path.join(build_drive, os.path.sep + 'temp')
context.SetEnv('TEMP', tmp_dir)
context.SetEnv('TMP', tmp_dir)
else:
tmp_dir = '/tmp'
print 'Making sure %s exists...' % tmp_dir
EnsureDirectoryExists(tmp_dir)
print 'Cleaning up the contents of %s...' % tmp_dir
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
TryToCleanContents(tmp_dir, file_name_filter)
# Mac has an additional temporary directory; clean it up.
# TODO(bradnelson): Fix Mac Chromium so that these temp files are created
# with open() + unlink() so that they will not get left behind.
if context.Mac():
subprocess.call(
"find /var/folders -name '.org.chromium.*' -exec rm -rfv '{}' ';'",
shell=True)
subprocess.call(
"find /var/folders -name '.com.google.Chrome*' -exec rm -rfv '{}' ';'",
shell=True)
# Skip over hooks when run inside the toolchain build because
# download_toolchains would overwrite the toolchain build.
if inside_toolchain:
with Step('gyp_generate_only', status):
CommandGypGenerate(context)
else:
with Step('gclient_runhooks', status):
CommandGclientRunhooks(context)
if context['clang']:
with Step('update_clang', status):
Command(context, cmd=['../tools/clang/scripts/update.sh'])
# Just build both bitages of validator and test for --validator mode.
if context['validator']:
with Step('build ncval-x86-32', status):
SCons(context, platform='x86-32', parallel=True, args=['ncval'])
with Step('build ncval-x86-64', status):
SCons(context, platform='x86-64', parallel=True, args=['ncval'])
with Step('clobber dfa_validator', status):
Command(context, cmd=['rm', '-rf', 'dfa_validator'])
with Step('clone dfa_validator', status):
Command(context, cmd=[
'git', 'clone',
'git://github.com/mseaborn/x86-decoder.git', 'dfa_validator32'])
Command(context, cmd=[
'git', 'checkout', '1a5963fa48739c586d5bbd3d46d0a8a7f25112f2'],
cwd='dfa_validator32')
Command(context, cmd=[
'git', 'clone',
'git://github.com/mseaborn/x86-decoder.git', 'dfa_validator64'])
Command(context, cmd=[
'git', 'checkout', '6ffa36f44cafd2cdad37e1e27254c498030ff712'],
cwd='dfa_validator64')
with Step('build dfa_validator_32', status):
Command(context, cmd=['make'], cwd='dfa_validator32')
with Step('build dfa_validator_64', status):
Command(context, cmd=['make'], cwd='dfa_validator64')
with Step('build ragel_validator-32', status):
SCons(context, platform='x86-32', parallel=True, args=['ncval_new'])
with Step('build ragel_validator-64', status):
SCons(context, platform='x86-64', parallel=True, args=['ncval_new'])
with Step('predownload validator corpus', status):
Command(context,
cmd=[sys.executable,
'tests/abi_corpus/validator_regression_test.py',
'--download-only'])
with Step('validator_regression_test current x86-32', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-32', 'scons-out/opt-linux-x86-32/staging/ncval')
with Step('validator_regression_test current x86-64', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-64', 'scons-out/opt-linux-x86-64/staging/ncval')
with Step('validator_regression_test dfa x86-32', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-32', 'dfa_validator32/dfa_ncval', warn_only=True)
with Step('validator_regression_test dfa x86-64', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-64', 'dfa_validator64/dfa_ncval', warn_only=True)
with Step('validator_regression_test ragel x86-32', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-32',
'scons-out/opt-linux-x86-32/staging/ncval_new')
with Step('validator_regression_test ragel x86-64', status,
halt_on_fail=False):
ValidatorTest(
context, 'x86-64',
'scons-out/opt-linux-x86-64/staging/ncval_new')
with Step('validator_diff_tests', status, halt_on_fail=False):
SCons(context, args=['validator_diff_tests'])
return
# Run checkdeps script to vet #includes.
with Step('checkdeps', status):
Command(context, cmd=[sys.executable, 'tools/checkdeps/checkdeps.py'])
# Make sure our Gyp build is working.
if not context['no_gyp']:
with Step('gyp_compile', status):
CommandGypBuild(context)
# The main compile step.
with Step('scons_compile', status):
SCons(context, parallel=True, args=[])
### BEGIN tests ###
with Step('small_tests', status, halt_on_fail=False):
SCons(context, args=['small_tests'])
with Step('medium_tests', status, halt_on_fail=False):
SCons(context, args=['medium_tests'])
with Step('large_tests', status, halt_on_fail=False):
SCons(context, args=['large_tests'])
with Step('compile IRT tests', status):
SCons(context, parallel=True, mode=['nacl_irt_test'])
with Step('small_tests under IRT', status, halt_on_fail=False):
SCons(context, mode=context['default_scons_mode'] + ['nacl_irt_test'],
args=['small_tests_irt'])
with Step('medium_tests under IRT', status, halt_on_fail=False):
SCons(context, mode=context['default_scons_mode'] + ['nacl_irt_test'],
args=['medium_tests_irt'])
# TODO(eugenis): reenable this on clang/opt once the LLVM issue is fixed
# http://code.google.com/p/nativeclient/issues/detail?id=2473
bug2473 = (context['clang'] or context['asan']) and context['mode'] == 'opt'
if context.Mac() and not bug2473:
# x86-64 is not fully supported on Mac. Not everything works, but we
# want to stop x86-64 sel_ldr from regressing, so do a minimal test here.
with Step('minimal x86-64 test', status, halt_on_fail=False):
SCons(context, parallel=True, platform='x86-64',
args=['run_hello_world_test'])
### END tests ###
if not context['no_gyp']:
# Build with ragel-based validator using GYP.
gyp_defines_save = context.GetEnv('GYP_DEFINES')
context.SetEnv('GYP_DEFINES',
' '.join([gyp_defines_save, 'nacl_validator_ragel=1']))
with Step('gyp_compile_ragel', status):
# Clobber GYP build to recompile necessary files with new preprocessor macro
# definitions. It is done because some build systems (such as GNU Make,
# MSBuild etc.) do not consider compiler arguments as a dependency.
RemoveGypBuildDirectories()
CommandGypGenerate(context)
CommandGypBuild(context)
context.SetEnv('GYP_DEFINES', gyp_defines_save)
# Build with ragel-based validator using scons.
with Step('scons_compile_ragel', status):
SCons(context, parallel=True, args=['validator_ragel=1'])
# Smoke tests for the R-DFA validator.
with Step('validator_ragel_tests', status):
args = ['validator_ragel=1',
'small_tests',
'medium_tests',
'large_tests',
]
# Add nacl_irt_test mode to be able to run run_hello_world_test_irt that
# tests validation of the IRT.
SCons(context,
mode=context['default_scons_mode'] + ['nacl_irt_test'],
args=args)
def Main():
# TODO(ncbray) make buildbot scripts composable to support toolchain use case.
context = BuildContext()
status = BuildStatus(context)
ParseStandardCommandLine(context)
SetupContextVars(context)
if context.Windows():
SetupWindowsEnvironment(context)
elif context.Linux():
SetupLinuxEnvironment(context)
elif context.Mac():
SetupMacEnvironment(context)
else:
raise Exception("Unsupported platform.")
RunBuild(BuildScript, status)
if __name__ == '__main__':
Main()
| bsd-3-clause |
mdeger/nest-simulator | topology/pynest/tests/test_dumping.py | 9 | 3718 | # -*- coding: utf-8 -*-
#
# test_dumping.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for topology hl_api dumping functions.
NOTE: These tests only test whether the code runs, it does not check
whether the results produced are correct.
"""
import unittest
import nest
import nest.topology as topo
import sys
import os
import os.path
class PlottingTestCase(unittest.TestCase):
def nest_tmpdir(self):
"""Returns temp dir path from environment, current dir otherwise."""
if 'NEST_DATA_PATH' in os.environ:
return os.environ['NEST_DATA_PATH']
else:
return '.'
def test_DumpNodes(self):
"""Test dumping nodes."""
ldict = {'elements': 'iaf_psc_alpha', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.DumpLayerNodes(l, os.path.join(self.nest_tmpdir(),
'test_DumpNodes.out.lyr'))
self.assertTrue(True)
def test_DumpNodes2(self):
"""Test dumping nodes, two layers."""
ldict = {'elements': 'iaf_psc_alpha', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.DumpLayerNodes(l * 2, os.path.join(self.nest_tmpdir(),
'test_DumpNodes2.out.lyr'))
self.assertTrue(True)
def test_DumpConns(self):
"""Test dumping connections."""
ldict = {'elements': 'iaf_psc_alpha', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.ConnectLayers(l, l, cdict)
topo.DumpLayerConnections(l, 'static_synapse',
os.path.join(self.nest_tmpdir(),
'test_DumpConns.out.cnn'))
self.assertTrue(True)
def test_DumpConns2(self):
"""Test dumping connections, 2 layers."""
ldict = {'elements': 'iaf_psc_alpha', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.ConnectLayers(l, l, cdict)
topo.DumpLayerConnections(l * 2, 'static_synapse',
os.path.join(self.nest_tmpdir(),
'test_DumpConns2.out.cnn'))
self.assertTrue(True)
def suite():
suite = unittest.makeSuite(PlottingTestCase, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
try:
import matplotlib.pyplot as plt
plt.show()
except ImportError:
pass
| gpl-2.0 |
MJuddBooth/pandas | pandas/tests/io/parser/test_converters.py | 2 | 4164 | # -*- coding: utf-8 -*-
"""
Tests column conversion functionality during parsing
for all of the parsers defined in parsers.py
"""
import numpy as np
import pytest
from pandas.compat import StringIO, lmap, parse_date
import pandas as pd
from pandas import DataFrame, Index
import pandas.util.testing as tm
def test_converters_type_must_be_dict(all_parsers):
parser = all_parsers
data = """index,A,B,C,D
foo,2,3,4,5
"""
with pytest.raises(TypeError, match="Type converters.+"):
parser.read_csv(StringIO(data), converters=0)
@pytest.mark.parametrize("column", [3, "D"])
@pytest.mark.parametrize("converter", [
parse_date,
lambda x: int(x.split("/")[2]) # Produce integer.
])
def test_converters(all_parsers, column, converter):
parser = all_parsers
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
result = parser.read_csv(StringIO(data), converters={column: converter})
expected = parser.read_csv(StringIO(data))
expected["D"] = expected["D"].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(all_parsers):
# see gh-2184
parser = all_parsers
data = """000102,1.2,A\n001245,2,B"""
converters = {0: lambda x: x.strip()}
result = parser.read_csv(StringIO(data), header=None,
converters=converters)
# Column 0 should not be casted to numeric and should remain as object.
expected = DataFrame([["000102", 1.2, "A"], ["001245", 2, "B"]])
tm.assert_frame_equal(result, expected)
def test_converters_euro_decimal_format(all_parsers):
# see gh-583
converters = dict()
parser = all_parsers
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,7387
2;121,12;14897,76;DEF;uyt;0,3773
3;878,158;108013,434;GHI;rez;2,7356"""
converters["Number1"] = converters["Number2"] =\
converters["Number3"] = lambda x: float(x.replace(",", "."))
result = parser.read_csv(StringIO(data), sep=";", converters=converters)
expected = DataFrame([[1, 1521.1541, 187101.9543, "ABC", "poi", 4.7387],
[2, 121.12, 14897.76, "DEF", "uyt", 0.3773],
[3, 878.158, 108013.434, "GHI", "rez", 2.7356]],
columns=["Id", "Number1", "Number2",
"Text1", "Text2", "Number3"])
tm.assert_frame_equal(result, expected)
def test_converters_corner_with_nans(all_parsers):
parser = all_parsers
data = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
# Example converters.
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith("+")
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith("+")
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find("-") > 0:
val_min, val_max = lmap(int, x.split("-"))
val = 0.5 * (val_min + val_max)
else:
val = float(x)
return val
results = []
for day_converter in [convert_days, convert_days_sentinel]:
result = parser.read_csv(StringIO(data),
converters={"score": convert_score,
"days": day_converter},
na_values=["", None])
assert pd.isna(result["days"][1])
results.append(result)
tm.assert_frame_equal(results[0], results[1])
def test_converter_index_col_bug(all_parsers):
# see gh-1835
parser = all_parsers
data = "A;B\n1;2\n3;4"
rs = parser.read_csv(StringIO(data), sep=";", index_col="A",
converters={"A": lambda x: x})
xp = DataFrame({"B": [2, 4]}, index=Index([1, 3], name="A"))
tm.assert_frame_equal(rs, xp)
| bsd-3-clause |
aburgasser/splat | splat/utilities.py | 1 | 100364 | # -*- coding: utf-8 -*-
from __future__ import print_function
"""
.. note::
These are the utility functions for SPLAT
"""
# imports: internal
import base64
import copy
import os
import re
import requests
import string
import sys
# imports - external
import astropy
from astropy.coordinates import Angle,SkyCoord,EarthLocation # coordinate conversion
from astropy import units as u # standard units
from astropy.time import Time # standard units
import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.patheffects
import numpy
from scipy import stats
from scipy.interpolate import interp1d,InterpolatedUnivariateSpline
from scipy.integrate import trapz
# code constants
import splat
from splat.initialize import *
# Python 2->3 fix for input
try: input=raw_input
except NameError: pass
# change the command prompt
sys.ps1 = 'splat util> '
#####################################################
########### SIMPLE HELPER FUNCTIONS #############
#####################################################
def isNumber(s):
'''
:Purpose: Checks if something is a number.
:param s: object to be checked
:type s: required
:Output: True or False
:Example:
>>> import splat
>>> print splat.isNumber(3)
True
>>> print splat.isNumber('hello')
False
'''
s1 = copy.deepcopy(s)
if isinstance(s1,bool): return False
if isinstance(s1,u.quantity.Quantity): s1 = s1.value
if isinstance(s1,float): return (True and not numpy.isnan(s1))
if isinstance(s1,int): return (True and not numpy.isnan(s1))
try:
s1 = float(s1)
return (True and not numpy.isnan(s1))
except ValueError:
return False
def isUnit(s):
'''
:Purpose:
Checks if something is an astropy unit quantity; written in response to the
many ways that astropy now codes unit quantities
:Required Inputs:
:param s: quantity to be checked
:Optional Inputs:
None
:Output:
True or False
:Example:
>>> import splat
>>> import astropy.units as u
>>> print splat.isUnit(3)
False
>>> print splat.isUnit(3.*u.s)
True
>>> print splat.isUnit(3.*u.s/u.s)
True
>>> print splat.isUnit((3.*u.s/u.s).value)
False
'''
return isinstance(s,u.quantity.Quantity) or \
isinstance(s,u.core.Unit) or \
isinstance(s,u.core.CompositeUnit) or \
isinstance(s,u.core.IrreducibleUnit) or \
isinstance(s,u.core.NamedUnit) or \
isinstance(s,u.core.PrefixUnit)
def numberList(numstr,sort=False):
'''
:Purpose:
Convert a string listing of numbers into an array of numbers
:Required Input:
:param **numstr**: string indicating number list, e.g., '45,50-67,69,72-90'
:Optional Input:
:param **sort**: set to True to sort output list (default = False)
:Output:
list of integers specified by string
:Example:
>>> import splat
>>> a = splat.numberList('45,50-67,69,72-90')
>>> print(a)
[45, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 69,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90]
'''
# check inputs
if not isinstance(numstr,str): raise ValueError('\nInput to numberList {} must be a string'.format(numstr))
numlist = []
tmp1 = numstr.replace(' ','')
tmp2 = tmp1.split(',')
for a in tmp2:
tmp3 = a.split(';')
for b in tmp3:
tmp4 = b.split('-')
if len(tmp4) > 1:
numlist.extend(list(range(int(tmp4[0]),int(tmp4[1])+1)))
else:
numlist.append(int(tmp4[0]))
if sort==True: numlist = sorted(numlist)
return numlist
def padWhereArray(w,mx):
'''
Purpose:
Pads the output of a numpy.where array to select (if available) one more index spot beyond limits
'''
if w[0][0] > 0: w = (numpy.insert(w[0],0,w[0][0]-1),)
if w[0][-1] < mx: w = (numpy.append(w[0],w[0][-1]+1),)
return w
def readDictFromFile(file,delim='\t',missing_value=None,data_type=[str],verbose=False,**kwargs):
'''
:Purpose:
Reads a simple text file into a series of key: value pairs and placed into a dictionary;
allows for assignment of variables to arrays
:Required Inputs:
:param: file: string containing full path to file to be read in; this file should be an ascii file with simple delimiters
:Optional Inputs:
:param: delim: delimiter to separate keys from values
:param: value_delim: delimiter to separate values; if not provided, defaults to ``delim``
:param: data_type: single or list of data type to apply to input data; must be str, int, float or complex
:param: missing_value: variable to replace missing values (keys without data)
:param: verbose: set to True to provide verbose feedback
:Outputs:
A dictionary of input file parameters
:Example:
Assume you have a data file of format:
this 5
that 5,6,7,8
other
>>> import splat
>>> readDictFromFile('input.txt',delim=' ',value_delim=',',data_type=[int,float])
{'this': 5, 'that': [6.0, 7.0, 8.0], 'other': None}
'''
list_delim = kwargs.get('list_delim',delim)
list_delim = kwargs.get('value_delim',list_delim)
if os.path.exists(file) == False:
raise ValueError('\nFile {} cannot be found'.format(file))
try:
with open(file) as f: dat = f.read()
dat = dat.split('\n')
except:
raise ValueError('\nUnable to read in file {} as simple ascii file'.format(file))
if len(dat) == 0:
if verbose == True: print('\nNo data found in file {}'.format(file))
return {}
if len(dat[0].split(delim)) < 2:
if verbose == True: print('\nWarning: delimiter {} not found in first line of file {}'.format(file))
# data types
try:
dtype = list(data_type)
except:
dtype = copy.deepcopy(data_type)
if not isinstance(dtype,list): dtype = [dtype]
# if verbose == True: print('\nWarning: could not intepret data type input {}, converting all to strings'.format(data_type))
while len(dtype) < len(dat): dtype.append(dtype[-1])
# separate and convert
output = {}
for i,line in enumerate(dat):
if line != '':
sp = line.split(delim)
ky = sp[0]
if len(sp) > 1:
val = sp[1:]
if list_delim != delim: val = sp[1].split(list_delim)
d = dtype[i]
if d not in [str,int,float,complex]: d = str
cval = []
for v in val:
try: cval.append(d(v))
except: pass
if len(cval) == 1: cval = cval[0]
else: cval = missing_value
output[ky] = cval
return output
def writeDictToFile(data,file,delim='\t',verbose=False,**kwargs):
'''
:Purpose:
Writes the contents of a dictionary to a simple ascii file into a series of key value pairs;
allows for writing of both individual variables and lists (but not nested dictionaries)
:Required Inputs:
:param: data: dictionary to be written out; cannot be a nested dictionary but can contain lists
:param: file: string containing full path to file to be written
:Optional Inputs:
:param: delim: delimiter to separate keys from values
:param: value_delim: delimiter to separate values; if not provided, defaults to ``delim``
:param: verbose: set to True to provide verbose feedback
:Outputs:
An output file
:Example:
>>> import splat
>>> d = {'this': 5., 'that': [4,6,8], 'other': 'something else'}
>>> writeDictToFile(d,'/Users/adam//Desktop/temp2.txt',delim='\t',value_delim=',')
True
Contents of file will be:
this 5.0
that 4,6,8
other something else
'''
value_delim = kwargs.get('value_delim',delim)
value_delim = kwargs.get('list_delim',value_delim)
if isinstance(data,dict) == False:
raise ValueError('\nInput data is not a dictionary'.format(file))
try:
f = open(file,'w')
except:
raise ValueError('\nCould not open file {} for writing'.format(file))
for k in list(data.keys()):
line = '{}{}'.format(k,delim)
val = data[k]
if isinstance(val,str): val = [val]
try:
val = list(val)
except:
val = [val]
line = line+'{}'.format(val[0])
if len(val) > 1:
for v in val[1:]: line = line+'{}{}'.format(value_delim,v)
f.write(line+'\n')
f.close()
return True
def directoryTree(folder,verbose=True):
'''
:Purpose:
Finds the lowest level directories within a given folder and returns the full paths for these
:Required Inputs:
:param: folder: directory to search
:Optional Inputs:
:param: verbose: set to True to provide verbose feedback
:Outputs:
A list of directory paths
:Example:
>>> import splat
>>> directoryTree(splat.LIBRARY_PUBLIC_FOLDER)
['/Users/adam/projects/splat/code/splat//resources/Data/Public/MAGE/',
'/Users/adam/projects/splat/code/splat//resources/Data/Public/SPEX-PRISM/',
'/Users/adam/projects/splat/code/splat//resources/Data/Public/LRIS-RED/']
'''
paths = []
if os.path.exists(folder)==False:
if verbose==True: print('Warning: folder {} cannot be found'.format(folder))
else:
for p,d,r in os.walk(folder):
if not d: paths.append(p+'/')
return paths
#####################################################
################ VARIOUS CHECKS #################
#####################################################
def checkFile(filename,**kwargs):
'''
:Purpose: Checks if a spectrum file exists in the SPLAT's library.
:param filename: A string containing the spectrum's filename.
:Example:
>>> import splat
>>> spectrum1 = 'spex_prism_1315+2334_110404.fits'
>>> print spl.checkFile(spectrum1)
True
>>> spectrum2 = 'fake_name.fits'
>>> print spl.checkFile(spectrum2)
False
'''
url = kwargs.get('url',SPLAT_URL)+DATA_FOLDER
return requests.get(url+filename).status_code == requests.codes.ok
def checkAccess(**kwargs):
'''
:Purpose: Checks if user has access to unpublished spectra in SPLAT library.
:Example:
>>> import splat
>>> print spl.checkAccess()
True
:Note: Must have the file .splat_access in your home directory with the correct passcode to use.
'''
result = False
try:
home = os.path.expanduser("~")
if home == None: home = './'
bcode = requests.get(SPLAT_URL+ACCESS_FILE).content
lcode = base64.b64encode(open(home+'/'+ACCESS_FILE,'r').read().encode())
if (bcode[:-3] in lcode): result = True
except:
result = False
if (kwargs.get('verbose',False) == True):
if result == True:
print('You have full access to all SPLAT data')
else:
print('You have access only to published data')
return result
def checkLocal(inputfile):
'''
:Purpose: Checks if a file is present locally or within the SPLAT
code directory
:Example:
>>> import splat
>>> spl.checkLocal('spl.py')
True # found the code
>>> spl.checkLocal('parameters.txt')
False # can't find this file
>>> spl.checkLocal('SpectralModels/BTSettl08/parameters.txt')
True # found it
'''
if not os.path.exists(os.path.normpath(inputfile)):
if not os.path.exists(os.path.normpath(SPLAT_PATH+inputfile)):
return ''
else:
return SPLAT_PATH+inputfile
else:
return inputfile
def checkOnline(*args):
'''
:Purpose: Checks if SPLAT's URL is accessible from your machine--
that is, checks if you and the host are online. Alternately
checks if a given filename is present locally or online
:Example:
>>> import splat
>>> spl.checkOnline()
True # SPLAT's URL was detected.
>>> spl.checkOnline()
False # SPLAT's URL was not detected.
>>> spl.checkOnline('SpectralModels/BTSettl08/parameters.txt')
'' # Could not find this online file.
'''
output = False
if len(args) != 0:
if 'http://' in args[0]:
try:
if requests.get(args[0]).status_code == requests.codes.ok:
output = args[0]
except:
pass
else:
try:
if requests.get(SPLAT_URL+args[0]).status_code == requests.codes.ok:
output = SPLAT_URL+args[0]
except:
pass
else:
try:
output = requests.get(SPLAT_URL).status_code == requests.codes.ok
except:
pass
return output
def checkOnlineFile(*args):
'''
:Purpose: Checks if SPLAT's URL is accessible from your machine--
that is, checks if you and the host are online. Alternately
checks if a given filename is present locally or online
:Example:
>>> import splat
>>> spl.checkOnlineFile('SpectralModels/BTSettl08/parameters.txt')
'' # Could not find this online file.
>>> spl.checkOnlineFile()
'' # SPLAT's URL was not detected; you are not online.
'''
if (len(args) != 0):
if 'http://' in args[0]:
if requests.get(args[0]).status_code == requests.codes.ok:
return args[0]
return ''
else:
if requests.get(SPLAT_URL+args[0]).status_code == requests.codes.ok:
return SPLAT_URL+args[0]
return ''
else:
return requests.get(SPLAT_URL).status_code == requests.codes.ok
def checkDict(ref,refdict,altref='altname',replace=[],verbose=False):
'''
Purpose:
General usage program to check if a key is present in a dictionary, with the option to look through alternate names
Required Inputs:
:param ref: A string containing the reference for lumiosity/SpT relation, should be among the keys and alternate names in refdict
:param refdict: dictionary containing empirical relation information
Optional Inputs:
None
Output:
A string containing SPLAT's default name for a given reference set, or False if that reference is not present
Example:
>>> import splat
>>> print(splat.checkDict('filippazzo',splat.SPT_LBOL_RELATIONS))
filippazzo2015
>>> print(splat.checkDict('burgasser',splat.SPT_BC_RELATIONS))
False
'''
output = False
refc = copy.deepcopy(ref)
# check reference
if not isinstance(refc,str):
return output
if len(replace) > 0:
for rep in replace:
if isinstance(rep,list) == True and len(rep) > 0: refc = refc.replace(rep[0],rep[1])
for k in list(refdict.keys()):
if refc.lower()==k.lower(): output = k
if altref in list(refdict[k].keys()):
if refc.lower() in [x.lower() for x in list(refdict[k][altref])]: output = k
if output == False:
if verbose: print('\nCould not find item {} in input dictionary; try: {}'.format(ref,list(refdict.keys())))
return output
def checkEmpiricalRelation(ref,refdict,verbose=False):
'''
Purpose:
General checking program for empirical relation dictionaries
Required Inputs:
:param ref: A string containing the reference for lumiosity/SpT relation, should be among the keys and alternate names in refdict
:param refdict: dictionary containing empirical relation information
Optional Inputs:
None
Output:
A string containing SPLAT's default name for a given reference set, or False if that reference is not present
Example:
>>> import splat
>>> print(splat.checkEmpiricalRelation('filippazzo',splat.SPT_LBOL_RELATIONS))
filippazzo2015
>>> print(splat.checkEmpiricalRelation('burgasser',splat.SPT_BC_RELATIONS))
False
'''
output = False
# check reference
if not isinstance(ref,str):
return output
for k in list(refdict.keys()):
if ref.lower()==k.lower() or ref.lower() in refdict[k]['altname']:
output = k
if output == False:
if verbose: print('\nReference {} is not among those present in the reference dictionary; try: {}'.format(ref,list(refdict.keys())))
return output
def checkInstrument(instrument):
'''
Purpose:
Checks that an instrument name is one of the available instruments, including a check of alternate names
Required Inputs:
:param: instrument: A string containing the instrument name to be checked. This should be one of the instruments in the global parameter splat.initialize.INSTRUMENTS
Optional Inputs:
None
Output:
A string containing SPLAT's default name for a given instrument, or False if that instrument is not present
Example:
>>> import splat
>>> splat.checkInstrument('SPEX PRISM')
SPEX-PRISM
>>> splat.checkInstrument('LRIS')
LRIS-RED
>>> splat.checkInstrument('somethingelse')
False
'''
return checkDict(instrument,INSTRUMENTS,replace=[['_','-'],[' ','-']])
# output = False
# if not isinstance(instrument,str):
# return output
# for k in list(INSTRUMENTS.keys()):
# if instrument.upper()==k.upper() or instrument.upper().replace(' ','_').replace('_','-')==k.upper() or instrument.upper() in [a.upper() for a in INSTRUMENTS[k]['altname']]:
# output = k
# return output
def checkFilterName(f,verbose=False):
'''
Purpose:
Checks that an input filter name is one of the available filters, including a check of alternate names
Required Inputs:
:param: filter: A string containing the filter name to be checked. This should be one of the names listed in `splat.FILTERS.keys()` or name alternates
Optional Inputs:
None
Output:
A string containing SPLAT's default name for a given filter, or False if that filter is not present
Example:
>>> import splat
>>> print(splat.checkFilterName('2MASS_KS'))
2MASS_KS
>>> print(splat.checkFilterName('2mass k'))
2MASS_KS
>>> print(splat.checkFilterName('somethingelse'))
False
'''
output = False
if not isinstance(f,str):
return output
for k in list(FILTERS.keys()):
if f.lower().replace(' ','_').replace('-','_') == k.lower() or f.lower().replace(' ','_') in [x.lower() for x in FILTERS[k]['altname']]:
output = k
if verbose==True and output==False:
print('\nSPLAT does not contain the filter {}'.format(f))
return output
def checkSpectralModelName(model):
'''
Purpose:
Checks that an input model name is one of the available spectral models, including a check of alternate names
Required Inputs:
:param: model: A string containing the spectral model to be checked. This should be one of the models listed in `loadModel()`_
.. _`loadModel()` : api.html#splat_model.loadModel
Optional Inputs:
None
Output:
A string containing SPLAT's default name for a given model set, or False if that model set is not present
Example:
>>> import splat
>>> print(splat.checkSpectralModelName('burrows'))
burrows06
>>> print(splat.checkSpectralModelName('allard'))
BTSettl2008
>>> print(splat.checkSpectralModelName('somethingelse'))
False
'''
return checkDict(model,SPECTRAL_MODELS)
# output = False
# if not isinstance(model,str):
# return output
# for k in list(SPECTRAL_MODELS.keys()):
# if model.lower()==k.lower() or model.lower() in SPECTRAL_MODELS[k]['altname']:
# output = k
# return output
def checkEvolutionaryModelName(model):
'''
Purpose:
Checks that an input model name is one of the available evolutionary models, including a check of alternate names
Required Inputs:
:param: model: A string containing the evolutionary model to be checked. This should be one of the models listed in splat.EVOLUTIONARY_MODELS.keys()
Optional Inputs:
None
Output:
A string containing SPLAT's default name for a given model set, or False if that model set is not present
Example:
>>> import splat
>>> print(splat.checkEvolutionaryModelName('burrows'))
burrows01
>>> print(splat.checkEvolutionaryModelName('allard'))
False
'''
output = False
if not isinstance(model,str):
return output
for k in list(EVOLUTIONARY_MODELS.keys()):
if model.lower()==k.lower() or model.lower() in EVOLUTIONARY_MODELS[k]['altname']:
output = k
return output
def checkAbsMag(ref,filt='',verbose=False):
'''
Purpose:
Checks that an input reference name and filter are among the available sets for `typeToMag()`_,
including a check of alternate names
.. _`typeToMag()` : TMP
Required Inputs:
:param ref: A string containing the reference for absolute magnitude relation,
among the keys and alternate names in splat.SPT_ABSMAG_RELATIONS
Optional Inputs:
:param filt: A string containing the filter name, to optionally check if this filter is among those defined in the reference set
Output:
A string containing SPLAT's default name for a given reference set, or False if that reference is not present
Example:
>>> import splat
>>> print(splat.checkEvolutionaryModelName('burrows'))
burrows01
>>> print(splat.checkEvolutionaryModelName('allard'))
False
'''
output = False
# check reference
if not isinstance(ref,str):
return output
for k in list(SPT_ABSMAG_RELATIONS.keys()):
if ref.lower()==k.lower() or ref.lower() in SPT_ABSMAG_RELATIONS[k]['altname']:
output = k
if output == False:
if verbose: print('\nReference {} is not among those used in SPLAT; try: {}'.format(ref,list(SPT_ABSMAG_RELATIONS.keys())))
return output
# check filter
if filt != '':
filt = checkFilterName(filt)
if filt == False:
if verbose: print('\nFilter {} is not among the filters used in SPLAT; try: {}'.format(filt,list(FILTERS.keys())))
return False
if filt not in list(SPT_ABSMAG_RELATIONS[output]['filters'].keys()):
if verbose: print('\nFilter {} is not among the filters defined for the {} absolutel magnitude relation; try: {}'.format(filt,output,list(SPT_ABSMAG_RELATIONS[output]['filters'].keys())))
return False
return output
def checkBC(ref,filt='',verbose=False):
'''
Purpose:
Checks that an input reference name and filter are among the available sets for `typeToBC()`_,
including a check of alternate names
.. _`typeToBC()` : TMP
Required Inputs:
:param ref: A string containing the reference for absolute magnitude relation,
among the keys and alternate names in splat.SPT_BC_RELATIONS
Optional Inputs:
:param filt: A string containing the filter name, to optionally check if this filter is among those defined in the reference set
Output:
A string containing SPLAT's default name for a given reference set, or False if that reference is not present
Example:
>>> import splat
>>> print(splat.checkBC('filippazzo','2MASS J'))
filippazzo2015
>>> print(splat.checkBC('dupuy','2MASS J'))
False
'''
output = False
# check reference
if not isinstance(ref,str):
return output
for k in list(SPT_BC_RELATIONS.keys()):
if ref.lower()==k.lower() or ref.lower() in SPT_BC_RELATIONS[k]['altname']:
output = k
if output == False:
if verbose: print('\nReference {} is not among those used in SPLAT; try: {}'.format(ref,list(SPT_BC_RELATIONS.keys())))
return output
# check filter
if filt != '':
filt = checkFilterName(filt)
if filt == False:
if verbose: print('\nFilter {} is not among the filters used in SPLAT; try: {}'.format(filt,list(FILTERS.keys())))
return False
if filt not in list(SPT_BC_RELATIONS[output]['filters'].keys()):
if verbose: print('\nFilter {} is not among the filters defined for the {} absolutel magnitude relation; try: {}'.format(filt,output,list(SPT_BC_RELATIONS[output]['filters'].keys())))
return False
return output
def checkLbol(ref,verbose=False):
'''
Purpose:
Checks that an input reference name are among the available sets for `typeToLuminosity()`_,
including a check of alternate names
.. _`typeToLuminosity()` : TMP
Required Inputs:
:param ref: A string containing the reference for lumiosity/SpT relation,
among the keys and alternate names in splat.SPT_LBOL_RELATIONS
Optional Inputs:
None
Output:
A string containing SPLAT's default name for a given reference set, or False if that reference is not present
Example:
>>> import splat
>>> print(splat.checkLbol('filippazzo'))
filippazzo2015
>>> print(splat.checkBC('burgasser'))
False
'''
output = False
# check reference
if not isinstance(ref,str):
return output
for k in list(SPT_LBOL_RELATIONS.keys()):
if ref.lower()==k.lower() or ref.lower() in SPT_LBOL_RELATIONS[k]['altname']:
output = k
if output == False:
if verbose: print('\nReference {} is not among those used in SPLAT; try: {}'.format(ref,list(SPT_LBOL_RELATIONS.keys())))
return output
return output
def checkTelescope(location):
'''
Purpose:
Checks that a location name is one of the telecopes listed in splat.initialize.TELESCOPES, including a check of alternate names
Required Inputs:
:param: location: A string containing the telescope/site name to be checked. This should be one of the locations in the global parameter splat.initialize.TELESCOPES
Optional Inputs:
None
Output:
A string containing SPLAT's default name for a given telescope, or False if that telecope is not present
Example:
>>> import splat
>>> print(splat.checkTelescope('keck'))
KECK
>>> print(splat.checkTelescope('mauna kea'))
KECK
>>> print(splat.checkTelescope('somethingelse'))
False
'''
output = False
if not isinstance(location,str):
return output
for k in list(TELESCOPES.keys()):
if location.upper().replace(' ','_').replace('-','_')==k.upper() or location.upper().replace(' ','_').replace('-','_') in [a.upper() for a in TELESCOPES[k]['altname']]:
output = k
return output
def checkLocation(location):
'''
Purpose:
Duplicate of checkTelescope()
'''
return checkTelescope(location)
#####################################################
############## SIMPLE CONVERSIONS ###############
#####################################################
#def caldateToDate(d):
'''
:Purpose: Convert from numeric date to calendar date, and vice-versa.
:param d: A numeric date of the format '20050412', or a date in the
calendar format '2005 Jun 12'
:Example:
>>> import splat
>>> caldate = splat.dateToCaldate('20050612')
>>> print caldate
2005 Jun 12
>>> date = splat.caldateToDate('2005 June 12')
>>> print date
20050612
'''
# return properDate(d,output='YYYY MMM DD')
#def dateToCaldate(d):
'''
:Purpose: Converts numeric date to calendar date
:param date: String in the form 'YYYYMMDD'
:type date: required
:Output: Date in format YYYY MMM DD
:Example:
>>> import splat
>>> splat.dateToCaldate('19940523')
1994 May 23
'''
# d1 = copy.deepcopy(d)
# if isNumber(d1): d1 = str(d1)
# return d1[:4]+' '+MONTHS[int(d1[5:6])-1]+' '+d1[-2:]
def properDate(din,**kwargs):
'''
:Purpose: Converts various date formats into a standardized date of YYYY-MM-DD
:param d: Date to be converted.
:param format: Optional input format of the following form:
* 'YYYY-MM-DD': e.g., 2011-04-03 (this is default output)
* 'YYYYMMDD': e.g., 20110403
* 'YYMMDD': e.g., 20110403
* 'MM/DD/YY': e.g., 03/04/11
* 'MM/DD/YYYY': e.g., 03/04/2011
* 'YYYY/MM/DD': e.g., 2011/03/04
* 'DD/MM/YYYY': e.g., 04/03/2011
* 'DD MMM YYYY': e.g., 04 Mar 2011
* 'YYYY MMM DD': e.g., 2011 Mar 04
:type format: Optional, string
:param output: Format of the output based on the prior list
:type output: Optional, string
:Example:
>>> import splat
>>> splat.properDate('20030502')
'2003-05-02'
>>> splat.properDate('2003/05/02')
'02-2003-05'
>>> splat.properDate('2003/05/02',format='YYYY/MM/DD')
'2003-05-02'
>>> splat.properDate('2003/05/02',format='YYYY/MM/DD',output='YYYY MMM DD')
'2003 May 02'
Note that the default output format can be read into an astropy.time quantity
>>> import splat
>>> from astropy.time import Time
>>> t = Time(splat.properDate('20030502'))
>>> print(t)
2003-05-02 00:00:00.000
'''
dformat = kwargs.get('format','')
oformat = kwargs.get('output','YYYY-MM-DD')
if len(din)==0:
print('\nCould not determine format of input date {}; please provide a format string\n'.format(din))
return ''
d = copy.deepcopy(din)
if not isinstance(d,str): d = str(int(d))
# some defaults
if '/' in d and dformat == '': # default American style
if len(d) <= 8:
dformat = 'MM/DD/YY'
else:
dformat = 'MM/DD/YYYY'
if True in [c.lower() in d.lower() for c in MONTHS] and dformat == '':
if isNumber(d.replace(' ','')[3]):
dformat = 'YYYY MMM DD'
else:
dformat = 'DD MMM YYYY'
if 'T' in d and dformat == '': # default American style
d = d.split('T')[0]
if isNumber(d) and dformat == '':
if len(str(d)) <= 6:
dformat = 'YYMMDD'
else:
dformat = 'YYYYMMDD'
# no idea
if dformat == '':
print('\nCould not determine format of input date {}; please provide a format string\n'.format(din))
return ''
# case statement for conversion to YYYY-MM-DD
if dformat == 'YYYYMMDD':
dp = d[:4]+'-'+d[4:6]+'-'+d[-2:]
elif dformat == 'YYMMDD':
if int(d[:2]) > 50:
dp = '19'+d[:2]+'-'+d[2:4]+'-'+d[-2:]
else:
dp = '20'+d[:2]+'-'+d[2:4]+'-'+d[-2:]
elif dformat == 'MM/DD/YYYY':
tmp = d.split('/')
if len(tmp[0]) == 1:
tmp[0] = '0'+tmp[0]
if len(tmp[1]) == 1:
tmp[1] = '0'+tmp[1]
dp = tmp[2]+'-'+tmp[0]+'-'+tmp[1]
elif dformat == 'MM/DD/YY':
tmp = d.split('/')
if len(tmp[0]) == 1:
tmp[0] = '0'+tmp[0]
if len(tmp[1]) == 1:
tmp[1] = '0'+tmp[1]
if int(tmp[2]) > 50:
dp = '19'+tmp[2]+'-'+tmp[0]+'-'+tmp[1]
else:
dp = '20'+tmp[2]+'-'+tmp[0]+'-'+tmp[1]
elif dformat == 'YYYY/MM/DD':
tmp = d.split('/')
if len(tmp[2]) == 1:
tmp[2] = '0'+tmp[2]
if len(tmp[1]) == 1:
tmp[1] = '0'+tmp[1]
dp = tmp[0]+'-'+tmp[1]+'-'+tmp[2]
elif dformat == 'DD/MM/YYYY':
tmp = d.split('/')
if len(tmp[0]) == 1:
tmp[0] = '0'+tmp[0]
if len(tmp[1]) == 1:
tmp[1] = '0'+tmp[1]
dp = tmp[2]+'-'+tmp[1]+'-'+tmp[0]
elif dformat == 'DD/MM/YY':
tmp = d.split('/')
if len(tmp[0]) == 1:
tmp[0] = '0'+tmp[0]
if len(tmp[1]) == 1:
tmp[1] = '0'+tmp[1]
if int(tmp[2]) > 50:
dp = '19'+tmp[2]+'-'+tmp[1]+'-'+tmp[0]
else:
dp = '20'+tmp[2]+'-'+tmp[1]+'-'+tmp[0]
elif dformat == 'DD MMM YYYY':
tmp = d.split(' ')
if len(tmp[0]) == 1:
tmp[0] = '0'+tmp[0]
for i,c in enumerate(MONTHS):
if c.lower() == tmp[1].lower():
mref = str(i+1)
if len(mref) == 1:
mref = '0'+mref
dp = tmp[2]+'-'+mref+'-'+tmp[0]
elif dformat == 'DD-MMM-YYYY':
tmp = d.split(' ')
if len(tmp[0]) == 1:
tmp[0] = '0'+tmp[0]
for i,c in enumerate(MONTHS):
if c.lower() == tmp[1].lower():
mref = str(i+1)
if len(mref) == 1:
mref = '0'+mref
dp = tmp[2]+'-'+mref+'-'+tmp[0]
elif dformat == 'YYYY MMM DD':
tmp = d.split(' ')
if len(tmp[2]) == 1:
tmp[2] = '0'+tmp[2]
for i,c in enumerate(MONTHS):
if c.lower() == tmp[1].lower():
mref = str(i+1)
if len(mref) == 1:
mref = '0'+mref
dp = tmp[0]+'-'+mref+'-'+tmp[2]
elif dformat == 'YYYY-MMM-DD':
tmp = d.split(' ')
if len(tmp[2]) == 1:
tmp[2] = '0'+tmp[2]
for i,c in enumerate(MONTHS):
if c.lower() == tmp[1].lower():
mref = str(i+1)
if len(mref) == 1:
mref = '0'+mref
dp = tmp[0]+'-'+mref+'-'+tmp[2]
else:
dp = d
# case statement for conversion from YYYY-MM-DD to desired output format
if oformat == 'YYYYMMDD':
df = dp.replace('-','')
elif oformat == 'YYMMDD':
df = dp.replace('-','')[2:]
elif oformat == 'MM/DD/YYYY':
tmp = dp.split('-')
df = tmp[1]+'/'+tmp[2]+'/'+tmp[0]
elif oformat == 'MM/DD/YY':
tmp = dp.split('-')
df = tmp[1]+'/'+tmp[2]+'/'+tmp[0][2:]
elif oformat == 'YYYY/MM/DD':
tmp = dp.split('-')
df = tmp[0]+'/'+tmp[1]+'/'+tmp[2]
elif oformat == 'DD/MM/YYYY':
tmp = dp.split('-')
df = tmp[2]+'/'+tmp[1]+'/'+tmp[0]
elif oformat == 'DD/MM/YY':
tmp = dp.split('-')
df = tmp[2]+'/'+tmp[1]+'/'+tmp[0][2:]
elif oformat == 'DD MMM YYYY':
tmp = dp.split('-')
df = tmp[2]+' '+MONTHS[int(tmp[1])-1]+' '+tmp[0]
elif oformat == 'DD-MMM-YYYY':
tmp = dp.split('-')
df = tmp[2]+'-'+MONTHS[int(tmp[1])-1]+'-'+tmp[0]
elif oformat == 'YYYY MMM DD':
tmp = dp.split('-')
df = tmp[0]+' '+MONTHS[int(tmp[1])-1]+' '+tmp[2]
elif oformat == 'YYYY-MMM-DD':
tmp = dp.split('-')
df = tmp[0]+'-'+MONTHS[int(tmp[1])-1]+'-'+tmp[2]
else:
df = dp
return df
def checkKeys(input,parameters,**kwargs):
'''
:Purpose: Checks the input kwargs keys against the expected parameters of a function to make sure the right parameters are passed.
:param input: input dictionary to a function (i.e., kwargs).
:param parameters: allowed parameters for the function
:param forcekey: (optional, default = False) if True, raises a Value Error if an incorrect parameter is passed
'''
kflag = False
forcekey = kwargs.get('forcekey',False)
for k in input.keys():
if k not in parameters:
print('\nParameter Warning!\nUnknown input keyword {}'.format(k))
kflag = True
if kflag:
if forcekey:
raise ValueError('Valid keywords are {}\n'.format(parameters))
else:
print('Valid keywords are {}\n'.format(parameters))
def coordinateToDesignation(c,prefix='J',sep='',split='',decimal=False):
'''
:Purpose: Converts right ascension and declination into a designation string
:param c: RA and Dec coordinate to be converted; can be a SkyCoord object with units of degrees,
a list with RA and Dec in degrees, or a string with RA measured in hour
angles and Dec in degrees
:Output: Designation string
:Example:
>>> import splat
>>> from astropy.coordinates import SkyCoord
>>> c = SkyCoord(238.86, 9.90, unit="deg")
>>> print splat.coordinateToDesignation(c)
J15552640+0954000
>>> print splat.coordinateToDesignation([238.86, 9.90])
J15552640+0954000
>>> print splat.coordinateToDesignation('15:55:26.4 +09:54:00.0')
J15552640+0954000
'''
# input is ICRS
# decreplace = ''
# if decimal==True: decreplace='.'
if isinstance(c,SkyCoord):
cc = copy.deepcopy(c)
else:
cc = properCoordinates(c)
# input is [RA,Dec] pair in degrees
output = '{}{}{}{}'.format(prefix, cc.ra.to_string(unit=u.hour, sep=sep, precision=2, pad=True), \
split , cc.dec.to_string(unit=u.degree, sep=sep, precision=1, alwayssign=True, pad=True))
if decimal==False: output = output.replace('.','')
# if sys.version_info.major == 2:
# return string.replace('{}{0}{}{1}'.format(prefix,cc.ra.to_string(unit=u.hour, sep=sep, precision=2, pad=True), \
# splitstr, cc.dec.to_string(unit=u.degree, sep=sep, precision=1, alwayssign=True, pad=True)),'.',decreplace)
# else:
# return str.replace('{}{0}{}{1}'.format(prefix,cc.ra.to_string(unit=u.hour, sep=sep, precision=2, pad=True), \
# splitstr, cc.dec.to_string(unit=u.degree, sep=sep, precision=1, alwayssign=True, pad=True)),'.',decreplace)
return output
def designationToCoordinate(value, **kwargs):
'''
:Purpose: Convert a designation string into a RA, Dec tuple or ICRS SkyCoord objects (default)
:param value: Designation string with RA measured in hour angles and Dec in degrees
:type value: required
:param icrs: returns astropy SkyCoord coordinate in ICRS frame if ``True``
:type icrs: optional, defualt = True
:Output: Coordinate, either as [RA, Dec] or SkyCoord object
:Example:
>>> import splat
>>> splat.designationToCoordinate('J1555264+0954120')
<SkyCoord (ICRS): (ra, dec) in deg
(238.8585, 9.90333333)>
'''
icrsflag = kwargs.get('icrs',True)
a = re.sub('[j.:hms]','',value.lower())
fact = 1.
spl = a.split('+')
if len(spl) == 1:
spl = a.split('-')
fact = -1.
ra = 15.*float(spl[0][0:2])
if (len(spl[0]) > 2):
ra+=15.*float(spl[0][2:4])/60.
if (len(spl[0]) > 4):
ra+=15.*float(spl[0][4:6])/3600.
if (len(spl[0]) > 6):
ra+=15.*float(spl[0][6:8])/360000.
dec = float(spl[1][0:2])
if (len(spl[1]) > 2):
dec+=float(spl[1][2:4])/60.
if (len(spl[1]) > 4):
dec+=float(spl[1][4:6])/3600.
if (len(spl[1]) > 6):
dec+=float(spl[1][6:8])/360000.
dec*=fact
if icrsflag:
return SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')
else:
return [ra,dec]
def designationToCoordinateString(designation,delimiter=' ',radec_delimiter=' '):
'''
:Purpose:
Convert a designation string into a coordinate string with delimiters between hour, minute, second, etc.
:Required Inputs:
:param designation: designation, which should be a string of the form 'J12345678+01234567'
:Optional Inputs:
:param: delimiter = ' ': delimiter between coordinate elements
:param: radec_delimiter = ' ': delimiter between RA and declination substrings
:Output:
coordinate string of the form '12 34 56.78 +01 23 45.67' (depending on delimiters)
:Example:
>>> import splat
>>> splat.designationToCoordinateString('J1555264+0954120')
15 55 26.4 +09 54 12.0
>>> splat.designationToCoordinateString('J155526400+095412000',delimiter=':')
15 55 26.400 +09 54 12.000
'''
if not isinstance(designation,string): raise ValueError('Input variable must be a string')
d = designation.replace('J','').replace('j','').replace('.','')
dsym = '+'
tmp = d.split(dsym)
if len(tmp) != 2:
dsym = '-'
tmp = d.split(dsym)
if len(tmp) != 2: raise ValueError('problem processing designation string {}'.format(d))
output = tmp[0][0:2]+delimiter+tmp[0][2:4]+delimiter+tmp[0][4:6]
if len(tmp[0]) > 6: output = output+'.'+tmp[0][6:]
output = output+radec_delimiter+dsym+tmp[1][0:2]+delimiter+tmp[1][2:4]+delimiter+tmp[1][4:6]
if len(tmp[1]) > 6: output = output+'.'+tmp[1][6:]
return output
def designationToShortName(value):
'''
:Purpose: Produce a shortened version of designation
:param value: Designation string with RA measured in hour angles and Dec in degrees
:type value: required
:Output: Shorthand designation string
:Example:
>>> import splat
>>> print splat.designationToShortName('J1555264+0954120')
J1555+0954
'''
if isinstance(value,str):
a = re.sub('[j.:hms]','',value.lower())
mrk = '+'
spl = a.split(mrk)
if len(spl) == 1:
mrk = '-'
spl = a.split(mrk)
if len(spl) == 2:
return 'J'+spl[0][0:4]+mrk+spl[1][0:4]
else:
return value
else:
raise ValueError('\nMust provide a string value for designation\n\n')
def properCoordinates(c,frame='icrs',icrs=True,**kwargs):
'''
:Purpose: Converts various coordinate forms to the proper SkyCoord format. Convertible forms include lists and strings.
:param c: coordinate to be converted. Can be a list (ra, dec) or a string.
:Example:
>>> import splat
>>> print splat.properCoordinates([104.79, 25.06])
<SkyCoord (ICRS): ra=104.79 deg, dec=25.06 deg>
>>> print splat.properCoordinates('06:59:09.60 +25:03:36.0')
<SkyCoord (ICRS): ra=104.79 deg, dec=25.06 deg>
>>> print splat.properCoordinates('J06590960+2503360')
<SkyCoord (ICRS): ra=104.79 deg, dec=25.06 deg>
'''
if isinstance(c,SkyCoord):
output = c
elif isinstance(c,list):
output = SkyCoord(c[0]*u.deg,c[1]*u.deg,frame=frame)
# input is sexigessimal string - assumed ICRS
elif isinstance(c,str):
if c[0] == 'J':
output = designationToCoordinate(c,**kwargs)
else:
output = SkyCoord(c,frame='icrs', unit=(u.hourangle, u.deg))
else:
raise ValueError('\nCould not parse input format\n\n')
# add distance
if kwargs.get('distance',False) != False:
d = copy.deepcopy(kwargs['distance'])
if not isUnit(d): d=d*u.pc
d.to(u.pc)
output = SkyCoord(output,distance = d)
# except:
# print('\nWarning: could not integrate distance {} into coordinate'.format(distance))
# convert to icrs by default
if icrs == True: return output.icrs
else: return output
def typeToNum(inp, subclass='dwarf', error='', uncertainty=0., luminosity_class = '', metallicity_class='', age_class = '', color_class='', peculiar=False, verbose=False, **kwargs):
'''
:Purpose:
Converts between string and numeric spectral types, with the option of specifying the class prefix/suffix and uncertainty tags
:Required inputs:
:param inp: Spectral type to convert. Can convert a number or a string from 0.0 (K0) and 49.0 (Y9).
:Optional inputs:
:param: error = '': flag to indicate magnitude of classification uncertainty; by default ':' for uncertainty > 1 subtypes and '::' for uncertainty > 2 subtype added as suffix to string output. Can also use `err`.
:param: uncertainty = 0: numerical uncertainty of classification; can also use `unc`
:param: subclass = 'dwarf': spectral class; options include:
- *field* or *fld* or *alpha*: object is a field dwarf - no prefix/suffix to string output
- *sd* or *subdwarf*: object is a subdwarf - 'sd' prefix to string output
- *dsd* or *d/sd*: object is an intermediate subdwarf - 'd/sd' prefix to string output
- *esd*: object is an extreme subdwarf - 'esd' prefix to string output
- *usd*: object is an ultra subdwarf - 'usd' prefix to string output
- *delta*: object is a extremely low surface gravity dwarf (~1 Myr) - 'delta' suffix to string output
- *vlg* or *gamma* or *lowg*: object is a low surface gravity dwarf (~10 Myr) - 'gamma' suffix to string output
- *intg* or *beta*: object is an intermediate surface gravity dwarf (~100 Myr) - 'beta' suffix to string output
- *giant*: object is a giant with luminosity class III suffix added to string output
- *subgiant*: object is a subgiant with luminosity class IV suffix added to string output
- *supergiant*: object is a supergiant with luminosity class I suffix added to string output
:param: metallicity_class = '': metallicity class of object, traditionally represented by 'sd','d/sd','esd','usd', and added on as prefix to string output. Can also use `lumclass`
:param: luminosity_class = '': luminosity class of object traditionally represented by roman numerals (e.g., 'III') and added on as suffix to string output. Can also use `lumclass`
:param: age_class = '': age class of object, traditionally one of 'alpha', 'beta', 'gamma', 'delta' and added on as suffix to string output (see subclass). Can also use 'ageclass'
:param: color_class: color class of object, traditionally 'b' (for blue) or 'r' (for red), added as prefix to string output. Can also use 'colorclass'
:param: peculiar = False: Set to True if object is peculiar, which adds a 'pec' suffix to string output
:param: verbose = False: Set to True to provide more feedback
:Outputs:
The number or string of a spectral type
:Example:
>>> import splat
>>> print splat.typeToNum(30)
T0.0
>>> print splat.typeToNum('T0.0')
30.0
>>> print splat.typeToNum(27, peculiar = True, uncertainty = 1.2, lumclass = 'II')
L7.0IIp:
>>> print splat.typeToNum(50)
Spectral type number must be between 0 (K0) and 49.0 (Y9)
nan
'''
# keywords
error = kwargs.get('err','')
uncertainty = kwargs.get('unc',uncertainty)
luminosity_class = kwargs.get('lumclass',luminosity_class)
metallicity_class = kwargs.get('z_class',metallicity_class)
metallicity_class = kwargs.get('metal_class',metallicity_class)
age_class = kwargs.get('ageclass',age_class)
colorclass = kwargs.get('colorclass','')
peculiar = kwargs.get('peculiar',False)
spletter = 'KMLTY'
# as of 12/18/2017, this only works on individual inputs
if isinstance(inp,list):
raise ValueError('\nInput to typeToNum() must be a single element (string or number)')
# convert input into an array
# output = []
# var = copy.deepcopy(inp)
# if not isinstance(var,list): var = [var]
# if not isinstance(error,list): error = [error]
# if not isinstance(unc,list): unc = [unc]
# if not isinstance(subclass,list): subclass = [subclass]
# if not isinstance(lumclass,list): lumclass = [lumclass]
# if not isinstance(ageclass,list): ageclass = [ageclass]
# if not isinstance(colorclass,list): colorclass = [colorclass]
# if len(error) < len(var):
# for i in numpy.arange(len(var)-len(error)): error.append(error[-1])
# if len(unc) < len(var):
# for i in numpy.arange(len(var)-len(unc)): unc.append(unc[-1])
# number -> spectral type
if isNumber(inp):
# if len(subclass) < len(var):
# for i in numpy.arange(len(var)-len(subclass)): subclass.append(subclass[-1])
# if len(lumclass) < len(var):
# for i in numpy.arange(len(var)-len(lumclass)): lumclass.append(lumclass[-1])
# for i,l in enumerate(lumclass):
# if l != '': lumclass[i]=' '+lumclass[i]
# if len(ageclass) < len(var):
# for i in numpy.arange(len(var)-len(ageclass)): ageclass.append(ageclass[-1])
# if len(colorclass) < len(var):
# for i in numpy.arange(len(var)-len(colorclass)): colorclass.append(colorclass[-1])
spind = int(abs(inp/10.))
if spind < 0 or spind > len(spletter):
if verbose: print('Spectral type number must be between 0 ({}0) and {} ({}9)'.format(spletter[0],len(spletter)*10.-1.,spletter[-1]))
return 'N/A'
spdec = numpy.around(inp,1)-spind*10.
# deal with subclasses
if subclass.lower() == 'sd' or subclass.lower() == 'subdwarf': metallicity_class = 'sd'
if subclass.lower() == 'dsd' or subclass.lower() == 'd/sd': metallicity_class = 'd/sd'
if subclass.lower() == 'esd': metallicity_class = 'esd'
if subclass.lower() == 'usd': metallicity_class = 'usd'
if subclass.lower() == 'giant': luminosity_class = 'III'
if subclass.lower() == 'subgiant': luminosity_class = 'IV'
if subclass.lower() == 'supergiant': luminosity_class = 'I'
if subclass.lower() == 'delta': age_class = 'delta'
if subclass.lower() == 'vlg' or subclass.lower() == 'vl-g' or subclass.lower() == 'lowg' or subclass.lower() == 'low-g' or subclass.lower() == 'gamma': age_class = 'gamma'
if subclass.lower() == 'intg' or subclass.lower() == 'int-g' or subclass.lower() == 'beta': age_class = 'beta'
if uncertainty > 1.: error = ':'
if uncertainty > 2.: error = '::'
pstr = ''
if peculiar == True: pstr = 'p'
return '{}{}{}{:3.1f}{}{}{}{}'.format(color_class,metallicity_class,spletter[spind],spdec,age_class,luminosity_class,pstr,error)
# spectral type -> number
elif isinstance(inp,str):
# output = []
if (sys.version_info.major == 2):
inp = string.split(inp,sep='+/-')[0] # remove +/- sides
else:
inp = inp.split('+/-')[0] # remove +/- sides
inp = inp.replace('...','').replace(' ','')
sptype = re.findall('[{}]'.format(spletter),inp.upper())
outval = 0.
# specialty classes
if len(sptype) >= 1:
ytype = re.findall('[abcd]',inp.split('p')[-1])
if len(ytype) == 1: age_class = ytype[0]
if inp.find('pec') != -1:
peculiar = True
inp.replace('pec','')
if inp.find('p') != -1:
peculiar = True
inp.replace('p','')
if inp.find('alpha') != -1:
age_class = 'alpha'
inp.replace('alpha','')
if inp.find('beta') != -1:
age_class = 'beta'
inp.replace('beta','')
if inp.find('gamma') != -1:
age_class = 'gamma'
inp.replace('gamma','')
if inp.find('delta') != -1:
age_class = 'delta'
inp.replace('delta','')
if inp.find('esd') != -1:
subclass = 'esd'
inp.replace('esd','')
elif inp.find('usd') != -1:
subclass = 'usd'
inp.replace('usd','')
elif inp.find('d/sd') != -1:
subclass = 'd/sd'
inp.replace('d/sd','')
elif inp.find('sd') != -1:
subclass = 'sd'
inp.replace('sd','')
if inp.count('I') > 0:
luminosity_class = ''.join(re.findall('I',inp))
inp.replace('I','')
if inp.count(':') > 0:
error = ''.join(re.findall(':',inp))
inp.replace(':','')
if inp[0] == 'b' or inp[0] == 'r':
color_class = inp[0]
inp.replace('b','')
inp.replace('r','')
outval = spletter.find(sptype[0])*10.
spind = inp.find(sptype[0])+1
if spind < len(inp):
if inp.find('.') < 0:
if isNumber(inp[spind]):
outval = outval+float(inp[spind])
else:
try:
outval = outval+float(inp[spind:spind+3])
spind = spind+3
except:
if verbose: print('\nProblem converting input type {} to a numeric type'.format(inp))
outval = numpy.nan
return outval
else:
if verbose: print('\nOnly spectral classes {} are handled by typeToNum'.format(spletter))
return numpy.nan
# none of the above - return the input
else:
if verbose: print('\nWarning: could not recognize format of spectral type {}\n'.format(inp))
return inp
def UVW(coord,distance,mu,rv,e_distance = 0.,e_mu = [0.,0.],e_rv = 0.,nsamp=100,full=False,verbose=False):
'''
THIS FUNCTION NEEDS CLEANING
'''
try:
from uvwxyz.uvwxyz import uvw as uvwcalc
except:
raise ValueError('\nMust have installed package uvwxyz to run this module: https://github.com/dr-rodriguez/uvwxyz')
try:
c = properCoordinates(coord)
except:
raise ValueError('\nCoordinate input {} is in incorrect format'.format(coord))
if not isinstance(mu,list) and not isinstance(mu,numpy.ndarray):
raise ValueError('\nProper motion input {} must be a 2-element list'.format(mu))
if not isinstance(e_mu,list) and not isinstance(e_mu,numpy.ndarray):
raise ValueError('\nProper motion uncertainty input {} must be a 2-element list'.format(e_mu))
if e_distance==0 and e_mu[0]==0 and e_mu[1]==0 and e_rv==0:
return uvwcalc(c.ra.degree,c.dec.degree,numpy.distance,mu[0],mu[1],rv),numpy.null
else:
if full==False:
us,vs,ws = uvwcalc(c.ra.degree,c.dec.degree,numpy.random.normal(distance,e_distance,nsamp),numpy.random.normal(mu[0],e_mu[0],nsamp),numpy.random.normal(mu[1],e_mu[1],nsamp),numpy.random.normal(rv,e_rv,nsamp))
return [numpy.median(us),numpy.std(us)],[numpy.median(vs),numpy.std(vs)],[numpy.median(ws),numpy.std(ws)]
else:
return uvwcalc(c.ra.degree,c.dec.degree,numpy.random.normal(distance,e_distance,nsamp),numpy.random.normal(mu[0],e_mu[0],nsamp),numpy.random.normal(mu[1],e_mu[1],nsamp),numpy.random.normal(rv,e_rv,nsamp))
def lbolToMbol(lbol,err=0.,scale='log',sun_scale=True,reverse=False):
l0 = 3.0128e28*u.Watt # in watts
lsun = u.Lsun
# Lbol -> Mbol
if reverse==False:
lb = copy.deepcopy(lbol)
le = copy.deepcopy(err)
if scale=='linear':
if not isUnit(lb):
if sun_scale==True: lb=lb*lsun
else: lb=lb*(l0.unit)
lb = numpy.log10((lb/lsun).decompose())
if not isUnit(le):
if sun_scale==True: le=le*lsun
else: le=le*(l0.unit)
le = numpy.log10((le/lsun).decompose())
mout = -2.5*lb-2.5*numpy.log10((lsun/l0).decompse())
mout_e = 2.5*le
if err == 0.:
return mout
else:
return mout,mout_e
# Mbol -> Lbol
else:
mb = copy.deepcopy(lbol)
mbe = copy.deepcopy(err)
lout = l0*10.**(-0.4*mb)
lout_e = lout*0.4*numpy.log(10)*mbe
if scale=='linear':
if err == 0.:
return lout
else:
return lout,lout_e
else:
lout_e = ((lout_e/lout).decompose())/numpy.log(10.)
lout = numpy.log10((lout/lsun).decompose())
if err == 0.: return lout
else: return lout.value,lout_e.value
def xyz(coordinate,center='sun',r0=8000*u.pc,z0=25*u.pc,unit=u.pc,**kwargs):
'''
:Purpose:
A "fast" method for converting a coordinate to heliocentric or galactocentric XYZ (cartesian) galaxy coordinates.
This assumes a right handed orientation with X from Sun to Galactic center, Y from Sun to the direction of Galactic rotation, and Z from Sun toward Galactic North.
Note that the astropy SkyCoord method also provides a way of producing `XYZ equatorial coordinates <http://docs.astropy.org/en/stable/api/astropy.coordinates.CartesianRepresentation.html>`_
:Required Inputs:
:param coordinate: A coordinate or list of coordinate variables, something that can be converted to astropy SkyCoord by `splat.properCoordinates()`_
:Optional Inputs:
:param distance: If not included in the coordinate variable, the distance to the source in pc (default: None)
:param center = 'sun': centering of coordinates; by default this is the Sun, but for full galacitic coordindates set to 'galactic'
:param r0 = 8000 pc: radial distance between Sun and Galactic center
:param z0 = 25 pc: vertical distance between Sun and Galactic plane
:param unit = astropy.units.pc: preferred unit
:Outputs:
A tuple (x,y,z), each of which is an array of x,y,z Galactic coordinates in preferred units
:Example:
>>> import splat
>>> c = splat.properCoordinates('J05591914-1404488',distance=10.2)
>>> splat.xyz(c)
(<Quantity -7.442377515807463 pc>, <Quantity -6.2399837133240235 pc>, <Quantity -3.116668119908577 pc>)
>>> splat.xyz(c,center='galactic')
(<Quantity 7992.5576224841925 pc>, <Quantity -6.2399837133240235 pc>, <Quantity 21.883331880091422 pc>)
.. _`splat.properCoordinates() <REF>`
'''
# check inputs
if not splat.isUnit(unit): unit = u.pc
if not isinstance(coordinate,list): c = [coordinate]
else: c = coordinate
if not isinstance(c[0],SkyCoord):
try:
c = [splat.properCoordinates(cd,**kwargs) for cd in c]
except:
raise ValueError('{} is not a proper coordinate'.format(coordinate))
if not isinstance(kwargs.get('distance',False),bool): distance=kwargs['distance']
elif str(c[0].distance.unit) != '': distance = [cd.distance for cd in c]
else:
raise ValueError('No distance value provided')
if isinstance(distance,numpy.ndarray): distance = list(distance)
if not isinstance(distance,list): distance = [distance]
if splat.isUnit(distance[0]): distance = [float(d.to(unit).value) for d in distance]
if splat.isUnit(r0): r0 = r0.to(unit).value
if splat.isUnit(z0): z0 = z0.to(unit).value
l = [cd.galactic.l.radian for cd in c]
b = [cd.galactic.b.radian for cd in c]
# make sure arrays are of the same length
while len(distance) < len(l): distance.append(distance[-1])
while len(l) < len(distance):
l.append(l[-1])
b.append(b[-1])
# compute xyz
distance = numpy.array(distance)
l = numpy.array(l)
b = numpy.array(b)
x = distance*numpy.cos(l)*numpy.cos(b)
y = distance*numpy.sin(l)*numpy.cos(b)
z = distance*numpy.sin(b)
if center.lower() == 'galactic':
x = x+r0
z = z+z0
if len(distance) == 1:
return x[0]*unit,y[0]*unit,z[0]*unit
else:
return x*unit,y*unit,z*unit
def baryVel(coord,obstime,location='keck',correction='barycenter'):
'''
:Purpose:
Computes the barycentric or heliocentric velocity in a direction and from a specific Earth location
:Required Inputs:
- :param coord: Coordinate of source; should be astropy.coordinates.SkyCoord, but can also be converted from splat.propoCoordinates
- :param obstime: A date/time, preferred in astropy.time.Time format but can be converted from splat.properDate
:Optional Inputs:
- :param location: location on Earth, specified by astropy.coordinates.EarthLocation; string of location;
dictionary containing 'ra', 'dec', and 'height'; or array of [ra,dec,height] (default = 'keck')
- :param correction: type of correction, can be either 'barycentric' or 'heliocentric' (default = 'heliocentric')
:Output:
The velocity correction in km/s
:Example:
>>> import splat
>>> coord = splat.properCoordinates('J15104786-2818174')
>>> print(splat.baryVel(coord,'2017-07-31',location='keck')
-27.552554878923033 km / s
'''
# check coordinate
if not isinstance(coord,SkyCoord):
try:
c = properCoordinates(coord)
except:
raise ValueError('\nCould not convert coordinate input {} to a SkyCoord'.format(coord))
else: c = copy.deepcopy(coord)
# check time
if not isinstance(obstime,Time):
try:
t = Time(obstime)
except:
raise ValueError('\nCould not convert time input {} into a Time variable'.format(obstime))
else: t = copy.deepcopy(obstime)
# check location
if not isinstance(location,EarthLocation):
if isinstance(location,str):
loc = checkTelescope(location)
if loc != False:
l = EarthLocation.from_geodetic(lat=TELESCOPES[loc]['lat'], lon=TELESCOPES[loc]['lon'], height=TELESCOPES[loc]['height'])
else:
try:
l = EarthLocation.of_site(location)
except:
raise ValueError('\nCould not convert location input {} into an EarthLocation; may be offline'.format(location))
elif isinstance(location,list) or isinstance(location,float):
try:
if len(location) == 2:
if not isUnit(l[0]): location = [x*u.deg for x in l]
l = EarthLocation.from_geodetic(lat=location[0], lon=location[1])
elif len(location) == 3:
if not isUnit(location[0]):
location[0] = l[0]*u.deg
location[1] = l[1]*u.deg
location[2] = l[2]*u.m
l = EarthLocation.from_geodetic(lat=location[0], lon=location[1], height=location[2])
else:
raise ValueError('\nCould not convert location input {} into an EarthLocation'.format(location))
except:
raise ValueError('\nCould not convert location input {} into an EarthLocation'.format(location))
elif isinstance(location,dict):
try:
l = EarthLocation.from_geodetic(**location)
except:
raise ValueError('\nCould not convert location input {} into an EarthLocation'.format(location))
else:
raise ValueError('\nCould not convert location input {} into an EarthLocation'.format(location))
else: l = copy.deepcopy(location)
# flag if we're not online
auto_max_age = 14.*u.day
if checkOnline() == False: auto_max_age = None
# make correction
if 'bary' in correction.lower():
return c.radial_velocity_correction(obstime=t, location=l).to(u.km/u.s)
elif 'helio' in correction.lower():
return c.radial_velocity_correction('heliocentric',obstime=t, location=l).to(u.km/u.s)
else:
raise ValueError('\n Could not interpret preferred correction {} '.format(correction))
def lsfRotation(vsini,vsamp,epsilon=0.6):
'''
Purpose:
Generates a line spread function for rotational broadening, based on Gray (1992)
Ported over by Chris Theissen and Adam Burgasser from the IDL routine
`lsf_rotate <https://idlastro.gsfc.nasa.gov/ftp/pro/astro/lsf_rotate.pro>`_ writting by W. Landsman
Required Inputs:
:param: **vsini**: vsini of rotation, assumed in units of km/s
:param: **vsamp**: sampling velocity, assumed in unit of km/s. vsamp must be smaller than vsini or else a delta function is returned
Optional Inputs:
:param: **epsilon**: limb darkening parameter based on Gray (1992)
Output:
Line spread function kernel with length 2*vsini/vsamp (forced to be odd)
:Example:
>>> import splat
>>> kern = lsfRotation(30.,3.)
>>> print(kern)
array([ 0. , 0.29053574, 0.44558751, 0.55691445, 0.63343877,
0.67844111, 0.69330989, 0.67844111, 0.63343877, 0.55691445,
0.44558751, 0.29053574, 0. ])
'''
# limb darkening parameters
e1 = 2. * (1. - epsilon)
e2 = numpy.pi * epsilon/2.
e3 = numpy.pi * (1. - epsilon/3.)
# vsini must be > vsamp - if not, return a delta function
if vsini <= vsamp:
print('\nWarning: velocity sampling {} is broader than vsini {}; returning delta function')
lsf = numpy.zeros(5)
lsf[2] = 1.
return lsf
# generate LSF
nsamp = numpy.ceil(2.*vsini/vsamp)
if nsamp % 2 == 0:
nsamp+=1
x = numpy.arange(nsamp)-(nsamp-1.)/2.
x = x*vsamp/vsini
x2 = numpy.absolute(1.-x**2)
return (e1*numpy.sqrt(x2) + e2*x2)/e3
#####################################################
############ STATISTICAL FUNCTIONS ##############
#####################################################
def distributionStats(x, q=[0.16,0.5,0.84], weights=None, sigma=None, **kwargs):
'''
:Purpose: Find key values along distributions based on quantile steps.
This code is derived almost entirely from triangle.py.
'''
# clean data of nans
xd = numpy.array(copy.deepcopy(x))
xd0 = copy.deepcopy(xd)
xd = xd[~numpy.isnan(xd)]
if q is None and sigma is None:
sigma = 1.
if sigma is not None:
q = [stats.norm.cdf(-sigma),0.5,stats.norm.cdf(sigma)]
if weights is None:
return numpy.percentile(xd, [100. * qi for qi in q])
else:
wt = numpy.array(copy.deepcopy(weights))
wt = wt[~numpy.isnan(xd0)]
idx = numpy.argsort(xd)
xsorted = xd[idx]
cdf = numpy.add.accumulate(wt[idx])
# print(xsorted,cdf,wt[idx],type(xd),type(cdf))
cdff = [float(c) for c in cdf]
cdfn = [c/cdff[-1] for c in cdff]
return numpy.interp(q, cdfn, xsorted).tolist()
def gauss(x,*p):
'''
Simple gaussian function for curve fit analysis
'''
A,mu,sig,c = p
return c+A*numpy.exp(-(x-mu)**2/(2*sig**2))
def reMap(x1,y1,x2,nsamp=100,method='fast'):
'''
:Purpose:
Maps a function y(x) onto a new grid x'. If x' is higher resolution this is done through interpolation;
if x' is lower resolution, this is done by integrating over the relevant pixels
Required Inputs:
:param x1: x-axis values for original function
:param y1: y-axis values for original function
:param x2: x-axis values for output function
Optional Inputs:
:param nsamp: Number of samples for stepwise integration if going from high resolution to low resolution
Output:
y-axis values for resulting remapped function
:Example:
>>> # a coarse way of downsampling spectrum
>>> import splat, numpy
>>> sp = splat.Spectrum(file='high_resolution_spectrum.fits')
>>> w_low = numpy.linspace(numpy.min(sp.wave.value),numpy.max(sp.wave.value),len(sp.wave.value)/10.)
>>> f_low = splat.integralResample(sp.wave.value,sp.flux.value,w_low)
>>> n_low = splat.integralResample(sp.wave.value,sp.noise.value,w_low)
>>> sp.wave = w_low*sp.wave.unit
>>> sp.flux = f_low*sp.flux.unit
>>> sp.noise = n_low*sp.noise.unit
'''
# check inputs
if x2[0] < x1[0] or x2[-1] > x1[-1]:
raise ValueError('\nOutput x range {} to {} must be within input x range {} to {}'.format(x2[0],x2[-1],x1[0],x1[-1]))
# low resolution -> high resolution: interpolation
if len(x1) <= len(x2):
f = interp1d(x1,y1,bounds_error=False,fill_value=0.)
y2 = f(x2)
# high resolution -> low resolution: integrate
else:
# slow flux-preserving method
if method == 'splat':
xs = [numpy.max([x1[0],x2[0]-0.5*(x2[1]-x2[0])])]
for i in range(len(x2)-1): xs.append(x2[i]+0.5*(x2[i+1]-x2[i]))
xs.append(numpy.min([x2[-1]+0.5*(x2[-1]-x2[-2]),x1[-1]]))
# integral loop
y2 = []
for i in range(len(x2)):
dx = numpy.linspace(xs[i],xs[i+1],nsamp)
y2.append(trapz(f(dx),x=dx)/trapz(numpy.ones(nsamp),x=dx))
# fast method
elif method == 'fast':
baseline = numpy.polynomial.Polynomial.fit(x1, y1, 4)
ip = InterpolatedUnivariateSpline(x1, y1/baseline(x1), k=3)
y2 = baseline(x2)*ip(x2)
return y2
def integralResample_OLD(xh, yh, xl, nsamp=100):
'''
:Purpose: A 1D integral smoothing and resampling function that attempts to preserve total flux. Uses
scipy.interpolate.interp1d and scipy.integrate.trapz to perform piece-wise integration
Required Inputs:
:param xh: x-axis values for "high resolution" data
:param yh: y-axis values for "high resolution" data
:param xl: x-axis values for resulting "low resolution" data, must be contained within high resolution and have fewer values
Optional Inputs:
:param nsamp: Number of samples for stepwise integration
Output:
y-axis values for resulting "low resolution" data
:Example:
>>> # a coarse way of downsampling spectrum
>>> import splat, numpy
>>> sp = splat.Spectrum(file='high_resolution_spectrum.fits')
>>> w_low = numpy.linspace(numpy.min(sp.wave.value),numpy.max(sp.wave.value),len(sp.wave.value)/10.)
>>> f_low = splat.integralResample(sp.wave.value,sp.flux.value,w_low)
>>> n_low = splat.integralResample(sp.wave.value,sp.noise.value,w_low)
>>> sp.wave = w_low*sp.wave.unit
>>> sp.flux = f_low*sp.flux.unit
>>> sp.noise = n_low*sp.noise.unit
'''
# check inputs
if xl[0] < xh[0] or xl[-1] > xh[-1]: raise ValueError('\nLow resolution x range {} to {} must be within high resolution x range {} to {}'.format(xl[0],xl[-1],xh[0],xh[-1]))
if len(xl) > len(xh): raise ValueError('\nTarget x-axis must be lower resolution than original x-axis')
# set up samples
xs = [numpy.max([xh[0],xl[0]-0.5*(xl[1]-xl[0])])]
for i in range(len(xl)-1): xs.append(xl[i]+0.5*(xl[i+1]-xl[i]))
xs.append(numpy.min([xl[-1]+0.5*(xl[-1]-xl[-2]),xh[-1]]))
f = interp1d(xh,yh)
# integral loop
ys = []
for i in range(len(xl)):
dx = numpy.linspace(xs[i],xs[i+1],nsamp)
ys.append(trapz(f(dx),x=dx)/trapz(numpy.ones(nsamp),x=dx))
# plt.plot(xh,yh,color='k')
# plt.plot(xl,ys,color='r')
return ys
def integralResample(xh, yh, xl, nsamp=100,method='fast'):
'''
:Purpose: A 1D integral smoothing and resampling function that attempts to preserve total flux. Uses
scipy.interpolate.interp1d and scipy.integrate.trapz to perform piece-wise integration
Required Inputs:
:param xh: x-axis values for "high resolution" data
:param yh: y-axis values for "high resolution" data
:param xl: x-axis values for resulting "low resolution" data, must be contained within high resolution and have fewer values
Optional Inputs:
:param nsamp: Number of samples for stepwise integration
Output:
y-axis values for resulting "low resolution" data
:Example:
>>> # a coarse way of downsampling spectrum
>>> import splat, numpy
>>> sp = splat.Spectrum(file='high_resolution_spectrum.fits')
>>> w_low = numpy.linspace(numpy.min(sp.wave.value),numpy.max(sp.wave.value),len(sp.wave.value)/10.)
>>> f_low = splat.integralResample(sp.wave.value,sp.flux.value,w_low)
>>> n_low = splat.integralResample(sp.wave.value,sp.noise.value,w_low)
>>> sp.wave = w_low*sp.wave.unit
>>> sp.flux = f_low*sp.flux.unit
>>> sp.noise = n_low*sp.noise.unit
'''
# check inputs
if xl[0] < xh[0] or xl[-1] > xh[-1]: raise ValueError('\nLow resolution x range {} to {} must be within high resolution x range {} to {}'.format(xl[0],xl[-1],xh[0],xh[-1]))
if len(xl) > len(xh): raise ValueError('\nTarget x-axis must be lower resolution than original x-axis')
# set up samples
if method == 'splat':
xs = [numpy.max([xh[0],xl[0]-0.5*(xl[1]-xl[0])])]
for i in range(len(xl)-1): xs.append(xl[i]+0.5*(xl[i+1]-xl[i]))
xs.append(numpy.min([xl[-1]+0.5*(xl[-1]-xl[-2]),xh[-1]]))
f = interp1d(xh,yh)
# integral loop
ys = []
for i in range(len(xl)):
dx = numpy.linspace(xs[i],xs[i+1],nsamp)
ys.append(trapz(f(dx),x=dx)/trapz(numpy.ones(nsamp),x=dx))
# plt.plot(xh,yh,color='k')
# plt.plot(xl,ys,color='r')
elif method == 'fast':
# print(xh,yh)
baseline = numpy.polynomial.Polynomial.fit(xh, yh, 4)
ip = InterpolatedUnivariateSpline(xh, yh/baseline(xh), k=3)
ys = baseline(xl)*ip(xl)
return ys
def randomSphereAngles(num,longitude_range=[0,2*numpy.pi],latitude_range=[-0.5*numpy.pi,0.5*numpy.pi],exclude_longitude_range=[],exclude_latitude_range=[],degrees=False,**kwargs):
'''
:Purpose:
Draw a set of angles from a uniform spherical distribution, with areal inclusion and exclusion constraints.
Note that latitude range is assumed to run from -pi/2 to +pi/2
:Required Input:
:param num: number of points to draw
:Optional Input:
:param: longitude_range = [0,2pi]: range over longitude to draw values
:param: latitude_range = [-pi,+pi]: range over latitude to draw values
:param: exclude_longitude_range = []: range of longitudes to exclude values
:param: exclude_latitude_range = []: range of latitudes to exclude values
:param: degrees = False: by default, radians are assumed; set to True to convert to degrees (also checks if inclusion/exclusion ranges are in degrees)
:Output:
2 arrays of longitudes and latitudes drawn uniformly over select area
:Example:
>>> import splat
>>> splat.randomSphereAngles(10)
(array([ 2.52679013, 0.85193769, 5.98514797, 0.89943465, 5.36310536,
5.34344768, 0.01743906, 4.93856229, 0.06508084, 0.5517308 ]),
array([-0.53399501, 0.04208564, 0.03089855, -0.60445954, 0.55800151,
0.80119146, -0.19318715, 0.76230148, -0.5935969 , -0.65839849]))
>>> splat.randomSphereAngles(10,latitude_range=[-10,10],degrees=True)
(array([ 28.55709202, 297.34760719, 152.79525894, 71.08745583,
153.56948338, 80.68486463, 7.75479896, 100.8408509 ,
356.63091754, 66.16572906]),
array([ 0.6747939 , -1.00316889, -2.26239023, 9.27397372, -8.96797181,
7.34796163, -1.93175289, 3.07888912, 0.69826684, -5.08428339]))
'''
# check inputs - convert to radians if necessary
if degrees==True and numpy.max(numpy.absolute(longitude_range)) > 2.*numpy.pi:
longitude_range = [l*numpy.pi/180. for l in longitude_range]
if degrees==True and numpy.max(numpy.absolute(latitude_range)) > numpy.pi:
latitude_range = [l*numpy.pi/180. for l in latitude_range]
# longitude - uniformly distributed
longitude = numpy.random.uniform(0,1,num)*(longitude_range[1]-longitude_range[0])+longitude_range[0]
# latitude - distributed by P(phi) = 1/2 cos(phi) for -pi/2 < phi < pi/2
x = numpy.linspace(latitude_range[0],latitude_range[1],num)
cdf = 0.5*(numpy.sin(x)+1.)
cdf = cdf-numpy.nanmin(cdf)
cdf = cdf/numpy.nanmax(cdf)
f = interp1d(cdf,x)
latitude = f(numpy.random.uniform(0,1,num))
# exclude ranges specified
if len(exclude_longitude_range) > 0:
if degrees==True and numpy.max(numpy.absolute(exclude_longitude_range)) > 2.*numpy.pi:
exclude_longitude_range = [l*numpy.pi/180. for l in exclude_longitude_range]
longex = longitude[longitude<numpy.nanmin(exclude_longitude_range)]
longex = numpy.concatenate((longex,longitude[longitude>numpy.nanmax(exclude_longitude_range)]))
while len(longex) < num:
longitude = numpy.random.uniform(0,1,num)*(longitude_range[1]-longitude_range[0])+longitude_range[0]
longex = numpy.concatenate((longex,longitude[longitude<numpy.nanmin(exclude_longitude_range)]))
longex = numpy.concatenate((longex,longitude[longitude>numpy.nanmax(exclude_longitude_range)]))
longitude = longex[:num]
if len(exclude_latitude_range) > 0:
if degrees==True and numpy.max(numpy.absolute(exclude_latitude_range)) > numpy.pi:
exclude_latitude_range = [l*numpy.pi/180. for l in exclude_latitude_range]
latex = latitude[latitude<numpy.nanmin(exclude_latitude_range)]
latex = numpy.concatenate((latex,latitude[latitude>numpy.nanmax(exclude_latitude_range)]))
while len(latex) < num:
x = numpy.linspace(latitude_range[0],latitude_range[1],num)
cdf = 0.5*(numpy.sin(x)+1.)
cdf = cdf-numpy.nanmin(cdf)
cdf = cdf/numpy.nanmax(cdf)
f = interp1d(cdf,x)
latitude = f(numpy.random.uniform(0,1,num))
latex = numpy.concatenate((latex,latitude[latitude<numpy.nanmin(exclude_latitude_range)]))
latex = numpy.concatenate((latex,latitude[latitude>numpy.nanmax(exclude_latitude_range)]))
latitude = latex[:num]
# outputs; convert to degrees if desired
if degrees==True:
latitude = latitude*180./numpy.pi
longitude = longitude*180./numpy.pi
return longitude, latitude
def weightedMeanVar(vals, winp, *args, **kwargs):
'''
:Purpose:
Computes weighted mean of an array of values through various methods. Returns weighted mean and weighted uncertainty.
:Required Inputs:
:param **vals**: array of values
:param **winp**: array of weights associated with ``vals``
:Optional Inputs:
:param **method**: type of weighting to be used. Options include:
- *default*: (default) ``winp`` is taken to be actual weighting values
- *uncertainty*: uncertainty weighting, where ``winp`` is the uncertainties of ``vals``
- *ftest*: ftest weighting, where ``winp`` is the chi squared values of ``vals``
:param **weight_minimum**: minimum possible weight value (default = 0.)
:param **dof**: effective degrees of freedom (default = len(vals) - 1)
.. note:: When using ``ftest`` method, extra ``dof`` value is required
:Output:
Weighted mean and uncertainty
:Example:
>>> import splat
>>> splat.weightedMeanVar([3.52, 5.88, 9.03], [0.65, 0.23, 0.19])
(5.0057009345794379, 4.3809422657000594)
>>> splat.weightedMeanVar([3.52, 5.88, 9.03], [1.24, 2.09, 2.29], method = 'uncertainty')
(5.0069199363443841, 4.3914329968409946)
'''
method = kwargs.get('method','')
minwt = kwargs.get('weight_minimum',0.)
dof = kwargs.get('dof',len(vals)-1)
if (numpy.nansum(winp) <= 0.):
weights = numpy.ones(len(vals))
if isinstance(winp,u.quantity.Quantity):
winput = winp.value
else:
winput = copy.deepcopy(winp)
# uncertainty weighting: input is unceratinties
if (method == 'uncertainty'):
weights = [w**(-2) for w in winput]
# ftest weighting: input is chisq values, extra dof value is required
elif (method == 'ftest'):
# fix issue of chi^2 = 0
minchi = numpy.nanmin(winput)
weights = numpy.array([stats.f.pdf(w/minchi,dof,dof) for w in winput])
# just use the input as the weights
else:
weights = [w for w in winput]
weights = weights/numpy.nanmax(weights)
weights[numpy.where(weights < minwt)] = 0.
mn = numpy.nansum(vals*weights)/numpy.nansum(weights)
var = numpy.nansum(weights*(vals-mn)**2)/numpy.nansum(weights)
if (method == 'uncertainty'):
var+=numpy.nansum([w**2 for w in winput])/(len(winput)**2)
return mn,numpy.sqrt(var)
#####################################################
############### DATABASE HELPERS ################
#####################################################
def checkDBCoordinates(db,designation_keyword='DESIGNATION',ra_keyword='RA',dec_keyword='DEC',shortname_keyword='SHORTNAME'):
# designation -> ra, dec
if designation_keyword in list(db.keys()):
if ra_keyword not in list(db.keys()) or dec_keyword not in list(db.keys()):
coord = [designationToCoordinate(d) for d in db[designation_keyword]]
db[ra_keyword] = [c.ra.deg for c in coord]
db[dec_keyword] = [c.dec.deg for c in coord]
# ra,dec -> designation
else:
if ra_keyword not in list(db.keys()) or dec_keyword not in list(db.keys()):
print('Warning: cannot populate designation column {} without RA column {} and DEC column {}'.format(designation_keyword,ra_keyword,dec_keyword))
else:
db[designation_keyword] = [coordinateToDesignation([db[ra_keyword].iloc[i],db[dec_keyword].iloc[i]]) for i in range(len(db))]
# designation -> shortname
if designation_keyword in list(db.keys()):
if shortname_keyword not in list(db.keys()):
db[shortname_keyword] = [designationToShortName(d) for d in db[designation_keyword]]
return db
#####################################################
################ CODE MANAGEMENT ################
#####################################################
#
# Note that all of these should have a checkAccess() flag
#
#####################################################
def codeStats():
if checkAccess() == False:
raise ValueError('You do not have sufficient access to run this program\n')
# library statistics - # of total/public spectra, # of total/public sources, # of source papers for public data
sall = splat.searchLibrary()
print('Total number of spectra = {} of {} sources'.format(len(sall),len(numpy.unique(numpy.array(sall['SOURCE_KEY'])))))
s = splat.searchLibrary(public=True)
print('Total number of public spectra = {} of {} sources'.format(len(s),len(numpy.unique(numpy.array(s['SOURCE_KEY'])))))
# data citations
pubs = numpy.unique(numpy.array(sall['DATA_REFERENCE']))
print('Total number of citations for all spectra = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(s['DATA_REFERENCE']))
print('Total number of citations for public spectra = {}'.format(len(pubs)))
cites = []
cites_html = []
for p in pubs:
try:
cites_html.append('<li>{} [<a href="{}">NASA ADS</a>]'.format(splat.citations.longRef(str(p)),splat.citations.citeURL(str(p))))
cites.append('{}'.format(splat.citations.longRef(str(p))))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
cites.sort()
with open(SPLAT_PATH+DOCS_FOLDER+'_static/citation_list.txt', 'w') as f:
f.write('Data references in SPLAT:\n')
for c in cites:
f.write('{}\n'.format(c))
cites_html.sort()
with open(SPLAT_PATH+DOCS_FOLDER+'_static/citation_list.html', 'w') as f:
f.write('<ul>\n')
for c in cites_html:
f.write('\t{}\n'.format(c))
f.write('</ul>\n')
# source citations
pubs = numpy.unique(numpy.array(sall['DISCOVERY_REFERENCE'].replace(numpy.nan,'')))
print('Total number of citations for all sources = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['OPT_TYPE_REF'].replace(numpy.nan,'')))
print('Total number of citations for all optical spectral types = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['NIR_TYPE_REF'].replace(numpy.nan,'')))
print('Total number of citations for all NIR spectral types = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['LIT_TYPE_REF'].replace(numpy.nan,'')))
print('Total number of citations for all literature spectral types = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['GRAVITY_CLASS_OPTICAL_REF'].replace(numpy.nan,'')))
print('Total number of citations for all optical gravity types = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['GRAVITY_CLASS_NIR_REF'].replace(numpy.nan,'')))
print('Total number of citations for all NIR gravity types = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['CLUSTER_REF'].replace(numpy.nan,'')))
print('Total number of citations for all cluster associations = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['BINARY_REF'].replace(numpy.nan,'')))
print('Total number of citations for all binary associations = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['SBINARY_REF'].replace(numpy.nan,'')))
print('Total number of citations for all spectral binary associations = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['COMPANION_REF'].replace(numpy.nan,'')))
print('Total number of citations for all companion associations = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['SIMBAD_SPT_REF'].replace(numpy.nan,'')))
print('Total number of citations for all SIMBAD SpTs = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['PARALLEX_REF'].replace(numpy.nan,'')))
print('Total number of citations for all parallaxes = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['MU_REF'].replace(numpy.nan,'')))
print('Total number of citations for all proper motions = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['RV_REF'].replace(numpy.nan,'')))
print('Total number of citations for all RVs = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
pubs = numpy.unique(numpy.array(sall['VSINI_REF'].replace(numpy.nan,'')))
print('Total number of citations for all vsini values = {}'.format(len(pubs)))
for p in pubs:
try:
x = splat.citations.longRef(str(p))
except:
print('\tWarning: no bibtex information for citation {}'.format(p))
# histogram of spectral types - all spectra
sptrng = [16,40]
xticks = range(sptrng[0],sptrng[1])
labels = [splat.typeToNum(x) for x in range(sptrng[0],sptrng[1])]
for i in range(2):
if i == 0:
s1 = sall[sall['OBJECT_TYPE'] == 'VLM']
fname = 'all'
else:
s1 = s[s['OBJECT_TYPE'] == 'VLM']
fname = 'published'
spex_spts = []
opt_spts = []
nir_spts = []
spts = []
for i,x in enumerate(s1['SPEX_TYPE']):
spt = -99.
if splat.isNumber(splat.typeToNum(x)):
sspt = splat.typeToNum(x)
spex_spts.append(sspt)
spt = copy.deepcopy(sspt)
nspt = splat.typeToNum(s1['NIR_TYPE'].iloc[i])
if splat.isNumber(nspt):
nir_spts.append(spt)
if nspt > 28.: spt = copy.deepcopy(nspt)
ospt = splat.typeToNum(s1['OPT_TYPE'].iloc[i])
if splat.isNumber(ospt):
opt_spts.append(spt)
if ospt < 29.: spt = copy.deepcopy(ospt)
if spt > 0: spts.append(spt)
# SpeX type
sptarr = numpy.array(spex_spts)
plt.figure(figsize=(14,6))
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= sptrng[0],sptarr < 20))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='green', alpha=0.75)
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= 20,sptarr < 30))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='red', alpha=0.75)
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= 30,sptarr < sptrng[1]))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='b', alpha=0.75)
plt.xticks(xticks,labels)
plt.xlabel('SpeX Spectral Type')
plt.ylabel('log10 Number')
plt.xlim([sptrng[0]-0.5,sptrng[1]+0.5])
plt.legend(['M dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= sptrng[0],sptarr < 20))])),'L dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= 20,sptarr < 30))])),'T dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= 30,sptarr < sptrng[1]))]))])
plt.savefig(SPLAT_PATH+DOCS_FOLDER+'_images/spt_spex_distribution_{}.png'.format(fname))
plt.clf()
# Optical type
sptarr = numpy.array(opt_spts)
plt.figure(figsize=(14,6))
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= sptrng[0],sptarr < 20))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='green', alpha=0.75)
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= 20,sptarr < 30))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='red', alpha=0.75)
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= 30,sptarr < sptrng[1]))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='b', alpha=0.75)
plt.xticks(xticks,labels)
plt.xlabel('Optical Spectral Type')
plt.ylabel('log10 Number')
plt.xlim([sptrng[0]-0.5,sptrng[1]+0.5])
plt.legend(['M dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= sptrng[0],sptarr < 20))])),'L dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= 20,sptarr < 30))])),'T dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= 30,sptarr < sptrng[1]))]))])
plt.savefig(SPLAT_PATH+DOCS_FOLDER+'_images/spt_optical_distribution_{}.png'.format(fname))
plt.clf()
# NIR type
sptarr = numpy.array(nir_spts)
plt.figure(figsize=(14,6))
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= sptrng[0],sptarr < 20))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='green', alpha=0.75)
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= 20,sptarr < 30))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='red', alpha=0.75)
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= 30,sptarr < sptrng[1]))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='b', alpha=0.75)
plt.xticks(xticks,labels)
plt.xlabel('NIR Spectral Type')
plt.ylabel('log10 Number')
plt.xlim([sptrng[0]-0.5,sptrng[1]+0.5])
plt.legend(['M dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= sptrng[0],sptarr < 20))])),'L dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= 20,sptarr < 30))])),'T dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= 30,sptarr < sptrng[1]))]))])
plt.savefig(SPLAT_PATH+DOCS_FOLDER+'_images/spt_nir_distribution_{}.png'.format(fname))
plt.clf()
# Adopted type
sptarr = numpy.array(spts)
plt.figure(figsize=(14,6))
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= sptrng[0],sptarr < 20))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='green', alpha=0.75)
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= 20,sptarr < 30))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='red', alpha=0.75)
n, bins, patches = plt.hist(sptarr[numpy.where(numpy.logical_and(sptarr >= 30,sptarr < sptrng[1]))], bins=len(range(sptrng[0],sptrng[1])), log=True, range=sptrng, facecolor='b', alpha=0.75)
plt.xticks(xticks,labels)
plt.xlabel('Adopted Spectral Type')
plt.ylabel('log10 Number')
plt.xlim([sptrng[0]-0.5,sptrng[1]+0.5])
plt.legend(['M dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= sptrng[0],sptarr < 20))])),'L dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= 20,sptarr < 30))])),'T dwarfs ({} sources)'.format(len(sptarr[numpy.where(numpy.logical_and(sptarr >= 30,sptarr < sptrng[1]))]))])
plt.savefig(SPLAT_PATH+DOCS_FOLDER+'_images/spt_adopted_distribution_{}.png'.format(fname))
plt.clf()
# histogram of S/N
# map sources on sky
raref = Angle(numpy.linspace(0,359.,360)*u.degree)
raref.wrap_at(180.*u.degree)
ra = Angle(list(sall['RA'])*u.degree)
ra = ra.wrap_at(180*u.degree)
dec = Angle(list(sall['DEC'])*u.degree)
rap = Angle(list(s['RA'])*u.degree)
rap = rap.wrap_at(180*u.degree)
decp = Angle(list(s['DEC'])*u.degree)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection="mollweide")
p1 = ax.scatter(ra.radian, dec.radian,color='r',alpha=0.5,s=10)
p2 = ax.scatter(rap.radian, decp.radian,color='k',alpha=0.5, s=5)
# ur = ax.plot(raref.radian,Angle([67.]*len(raref)*u.degree).radian,'k--')
# ur = ax.plot(raref.radian,Angle([-50.]*len(raref)*u.degree).radian,'k--')
ax.set_xticklabels(['14h','16h','18h','20h','22h','0h','2h','4h','6h','8h','10h'])
ax.grid(True)
# ef = matplotlib.patheffects.withStroke(foreground="w", linewidth=3)
# axis = ax.axis['lat=0']
# axis.major_ticklabels.set_path_effects([ef])
# axis.label.set_path_effects([ef])
plt.legend([p1,p2],['All Sources ({})'.format(len(sall)),'Published Sources ({})'.format(len(s))],bbox_to_anchor=(1, 1),bbox_transform=plt.gcf().transFigure)
fig.savefig(SPLAT_PATH+DOCS_FOLDER+'_images/map_all.png')
fig.clf()
# map sources on based on spectral type
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection="mollweide")
sm = splat.searchLibrary(spt_range=[10,19.9],spt_type='SPEX')
sm = sm[sm['OBJECT_TYPE'] == 'VLM']
sl = splat.searchLibrary(spt_range=[20,29.9],spt_type='SPEX')
sl = sl[sl['OBJECT_TYPE'] == 'VLM']
st = splat.searchLibrary(spt_range=[30,39.9],spt_type='SPEX')
st = st[st['OBJECT_TYPE'] == 'VLM']
ra = Angle(list(sm['RA'])*u.degree)
ra = ra.wrap_at(180*u.degree)
dec = Angle(list(sm['DEC'])*u.degree)
p1 = ax.scatter(ra.radian, dec.radian,color='k',alpha=0.5,s=10)
ra = Angle(list(sl['RA'])*u.degree)
ra = ra.wrap_at(180*u.degree)
dec = Angle(list(sl['DEC'])*u.degree)
p2 = ax.scatter(ra.radian, dec.radian,color='r',alpha=0.5,s=10)
ra = Angle(list(st['RA'])*u.degree)
ra = ra.wrap_at(180*u.degree)
dec = Angle(list(st['DEC'])*u.degree)
p3 = ax.scatter(ra.radian, dec.radian,color='b',alpha=0.5,s=10)
plt.legend([p1,p2,p3],['M dwarfs ({})'.format(len(sm)),'L dwarfs ({})'.format(len(sl)),'T dwarfs ({})'.format(len(st))],bbox_to_anchor=(1, 1),bbox_transform=plt.gcf().transFigure)
ax.set_xticklabels(['14h','16h','18h','20h','22h','0h','2h','4h','6h','8h','10h'])
ax.grid(True)
fig.savefig(SPLAT_PATH+DOCS_FOLDER+'_images/map_byspt.png')
fig.clf()
# map sources on based on young or field
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection="mollweide")
sy = splat.searchLibrary(young=True)
sy = sy[sy['OBJECT_TYPE'] == 'VLM']
so = splat.searchLibrary()
so = so[so['OBJECT_TYPE'] == 'VLM']
ra = Angle(list(so['RA'])*u.degree)
# ra = ra.wrap_at(180*u.degree)
# dec = Angle(so['DEC'].filled(numpy.nan)*u.degree)
# p1 = ax.scatter(ra.radian, dec.radian,color='k',alpha=0.1,s=5)
ra = Angle(list(sy['RA'])*u.degree)
ra = ra.wrap_at(180*u.degree)
dec = Angle(list(sy['DEC'])*u.degree)
p1 = ax.scatter(ra.radian, dec.radian,color='r',alpha=0.5,s=10)
ax.set_xticklabels(['14h','16h','18h','20h','22h','0h','2h','4h','6h','8h','10h'])
ax.grid(True)
plt.legend([p1],['Young Sources ({})'.format(len(sy))],bbox_to_anchor=(1, 1),bbox_transform=plt.gcf().transFigure)
fig.savefig(SPLAT_PATH+DOCS_FOLDER+'_images/map_young.png')
fig.clf()
# pie chart of spectrum types
ot = numpy.unique(numpy.array(sall['OBJECT_TYPE']))
otypes = 'STAR','GIANT','WD','GALAXY','OTHER'
sizes = [len(sall[sall['OBJECT_TYPE']==o]) for o in otypes]
explode = (0.1,0,0,0,0)
fig, ax = plt.subplots()
ax.pie(sizes, explode=explode, labels=otypes, autopct='%1.1f%%',
shadow=True, startangle=90, pctdistance = 0.7)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
fig.savefig(SPLAT_PATH+DOCS_FOLDER+'_images/object_othertypes.png')
def about():
'''
Gives basic information about SPLAT code
'''
print('\nSPLAT (SpeX Prism Library and Analysis Toolkit)')
print('\nSPLAT was created by members of the Cool Star Lab:')
for a in splat.AUTHORS: print('\t'+a)
print('\nFunding for SPLAT was provided by the National Aeronautics and Space Administration under grant NNX15AI75G')
print('\nSPLAT can be downloaded at '+splat.GITHUB_URL)
print('Documentation can be found at '+splat.DOCUMENTATION_URL)
print('\nIf you use SPLAT, please cite the software paper '+splat.citations.shortRef(splat.BIBCODE))
print('\nIf you use any of the data or models in SPLAT, you must cite the original references for these')
return
| mit |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/networkx/readwrite/tests/test_gml.py | 35 | 3099 | #!/usr/bin/env python
import io
from nose.tools import *
from nose import SkipTest
import networkx
class TestGraph(object):
@classmethod
def setupClass(cls):
global pyparsing
try:
import pyparsing
except ImportError:
try:
import matplotlib.pyparsing as pyparsing
except:
raise SkipTest('gml test: pyparsing not available.')
def setUp(self):
self.simple_data="""Creator me
graph [
comment "This is a sample graph"
directed 1
IsPlanar 1
pos [ x 0 y 1 ]
node [
id 1
label "Node 1"
pos [ x 1 y 1 ]
]
node [
id 2
pos [ x 1 y 2 ]
label "Node 2"
]
node [
id 3
label "Node 3"
pos [ x 1 y 3 ]
]
edge [
source 1
target 2
label "Edge from node 1 to node 2"
color [line "blue" thickness 3]
]
edge [
source 2
target 3
label "Edge from node 2 to node 3"
]
edge [
source 3
target 1 label
"Edge from node 3 to node 1"
]
]
"""
def test_parse_gml(self):
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals(sorted(G.nodes()),\
['Node 1', 'Node 2', 'Node 3'])
assert_equals( [e for e in sorted(G.edges())],\
[('Node 1', 'Node 2'),
('Node 2', 'Node 3'),
('Node 3', 'Node 1')])
assert_equals( [e for e in sorted(G.edges(data=True))],\
[('Node 1', 'Node 2',
{'color': {'line': 'blue', 'thickness': 3},
'label': 'Edge from node 1 to node 2'}),
('Node 2', 'Node 3',
{'label': 'Edge from node 2 to node 3'}),
('Node 3', 'Node 1',
{'label': 'Edge from node 3 to node 1'})])
def test_read_gml(self):
import os,tempfile
(fd,fname)=tempfile.mkstemp()
fh=open(fname,'w')
fh.write(self.simple_data)
fh.close()
Gin=networkx.read_gml(fname,relabel=True)
G=networkx.parse_gml(self.simple_data,relabel=True)
assert_equals( sorted(G.nodes(data=True)), sorted(Gin.nodes(data=True)))
assert_equals( sorted(G.edges(data=True)), sorted(Gin.edges(data=True)))
os.close(fd)
os.unlink(fname)
def test_relabel_duplicate(self):
data="""
graph
[
label ""
directed 1
node
[
id 0
label "same"
]
node
[
id 1
label "same"
]
]
"""
fh = io.BytesIO(data.encode('UTF-8'))
fh.seek(0)
assert_raises(networkx.NetworkXError,networkx.read_gml,fh,relabel=True)
def test_bool(self):
G=networkx.Graph()
G.add_node(1,on=True)
G.add_edge(1,2,on=False)
data = '\n'.join(list(networkx.generate_gml(G)))
answer ="""graph [
node [
id 0
label 1
on 1
]
node [
id 1
label 2
]
edge [
source 0
target 1
on 0
]
]"""
assert_equal(data,answer)
| agpl-3.0 |
raghavrv/scikit-learn | sklearn/decomposition/truncated_svd.py | 13 | 8301 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). Contrary to PCA, this
estimator does not center the data before computing the singular value
decomposition. This means it can work with scipy.sparse matrices
efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithms: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int, RandomState instance or None, optional, default = None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ : array, shape (n_components,)
The variance of the training samples transformed by a projection to
each component.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=7,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.0606... 0.0584... 0.0497... 0.0434... 0.0372...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.249...
>>> print(svd.singular_values_) # doctest: +ELLIPSIS
[ 2.5841... 2.5245... 2.3201... 2.1753... 2.0443...]
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
random_state = check_random_state(self.random_state)
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = U * Sigma
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
self.singular_values_ = Sigma # Store the singular values.
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
Edouard360/text-mining-challenge | classifier.py | 1 | 3124 | import xgboost as xgb
from matplotlib import pyplot
from sklearn.base import BaseEstimator
class Classifier(BaseEstimator):
def __init__(self):
self.name = "XGBClassifier"
self.n_estimators = 100
self.max_depth = 10
self.clf = xgb.XGBClassifier(n_estimators=self.n_estimators,
max_depth=self.max_depth,
learning_rate=0.1,
silent=1,
objective='binary:logistic',
nthread=-1,
gamma=0.001,
min_child_weight=1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.508,
seed=0,
missing=None)
def plotlearningcurves(self, eval_set):
self.clf.fit(eval_set[0][0], eval_set[0][1], eval_metric=["logloss", "error"],
eval_set=eval_set, verbose=False)
results = self.clf.evals_result()
epochs = len(results['validation_0']['error'])
x_axis = range(0, epochs)
# plot log loss
fig, ax = pyplot.subplots()
ax.plot(x_axis, results['validation_0']['logloss'], label='Train')
ax.plot(x_axis, results['validation_1']['logloss'], label='Test')
ax.legend()
pyplot.ylabel('Log Loss')
pyplot.title('XGBoost Log Loss')
# pyplot.show()
pyplot.savefig("report/figures/logloss_learning_curve")
# plot classification error
fig, ax = pyplot.subplots()
ax.plot(x_axis, results['validation_0']['error'], label='Train')
ax.plot(x_axis, results['validation_1']['error'], label='Test')
ax.legend()
pyplot.ylabel('Classification Error')
pyplot.title('XGBoost Classification Error')
# pyplot.show()
pyplot.savefig("report/figures/classification_error_learning_curve")
# # plot f1 score
# figures, ax = pyplot.subplots()
# ax.plot(x_axis, results['validation_0']['f1'], label='Train')
# ax.plot(x_axis, results['validation_1']['f1'], label='Test')
# ax.legend()
# pyplot.ylabel('F1 score')
# pyplot.title('XGBoost F1Error')
# pyplot.show()
def early_stop(self, eval_set):
self.clf.fit(eval_set[0][0], eval_set[0][1],
early_stopping_rounds=10, eval_metric="logloss",
eval_set=eval_set, verbose=True)
def fit(self, X, y):
self.clf.fit(X, y)
def predict(self, X):
return self.clf.predict(X)
def predict_proba(self, X):
return self.clf.predict_proba(X)
| apache-2.0 |
heiko-r/paparazzi | sw/tools/tcp_aircraft_server/phoenix/__init__.py | 86 | 4470 | #Copyright 2014, Antoine Drouin
"""
Phoenix is a Python library for interacting with Paparazzi
"""
import math
"""
Unit convertions
"""
def rad_of_deg(d): return d/180.*math.pi
def deg_of_rad(r): return r*180./math.pi
def rps_of_rpm(r): return r*2.*math.pi/60.
def rpm_of_rps(r): return r/2./math.pi*60.
def m_of_inch(i): return i*0.0254
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color' : 'k', 'fontsize' : 20 }
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig == None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend <> None:
ax.legend(legend, loc='best')
if xlim <> None:
ax.set_xlim(xlim[0], xlim[1])
if ylim <> None:
ax.set_ylim(ylim[0], ylim[1])
"""
Messages
"""
#: dictionary mapping the C type to its length in bytes (e.g char -> 1)
TYPE_TO_LENGTH_MAP = {
"char" : 1,
"uint8" : 1,
"int8" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"float" : 4,
"double" : 8,
}
#: dictionary mapping the C type to correct format string
TYPE_TO_PRINT_MAP = {
float : "%f",
str : "%s",
chr : "%c",
int : "%d"
}
ACID_ALL = 0xFF
ACID_TEST = 0xFE
ACID_GROUNDSTATION = 0xFD
#: dictionary mapping debug types to format characters
DEBUG_MESSAGES = {
"DEBUG_UINT8" : "%d",
"DEBUG_INT32" : "%d",
"DEBUG_FLOAT" : "%#f"
}
"""
Binary logs
See format description in sw/airborne/subsystems/datalink/fms_link.c
"""
import struct
def hex_of_bin(b): return ' '.join( [ "%02X" % ord( x ) for x in b ] )
import pdb
def read_binary_log(filename, tick_freq = 2*512.):
f = open(filename, "rb")
d = f.read()
packet_header_len = 6
msg_header_len = 2
def read_packet(d, packet_start):
payload_start = packet_start+packet_header_len
timestamp, payload_len = struct.unpack("IH", d[packet_start:payload_start])
msgs = read_packet_payload(d, payload_start, payload_len)
next_packet = payload_start+payload_len+2
return timestamp, msgs, next_packet
def read_packet_payload(d, s, l):
msgs = []
packet_end = s+l; msg_start = s
while msg_start<packet_end:
payload_start = msg_start+msg_header_len
msg_len, msg_id = struct.unpack("BB", d[msg_start:payload_start])
payload_end = payload_start+msg_len
msg_payload = d[payload_start:payload_end]
msgs.append([msg_id, msg_payload])
#print msg_id, msg_len, hex_of_bin(msg_payload)
msg_start = payload_end
return msgs
packets = []
packet_start=0
while packet_start<len(d):
timestamp, msgs, next_packet = read_packet(d, packet_start)
packets.append([timestamp/tick_freq, msgs])
#print timestamp, msgs
packet_start = next_packet
f.close()
return packets
def extract_from_binary_log(protocol, packets, msg_names, t_min=None, t_max=None):
ret = [{'time':[], 'data':[]} for m in msg_names]
if t_min == None: t_min = packets[0][0]
if t_max == None: t_max = packets[-1][0]
for t, msgs in packets:
if t>= t_min and t<= t_max:
for id, payload in msgs:
m = protocol.get_message_by_id('telemetry', id)
try: i = msg_names.index(m.name)
except: pass
finally: ret[i]['time'].append(t); ret[i]['data'].append(m.unpack_scaled_values(payload))
return ret
| gpl-2.0 |
wxs/keras | examples/addition_rnn.py | 50 | 5900 | # -*- coding: utf-8 -*-
from __future__ import print_function
from keras.models import Sequential, slice_X
from keras.layers.core import Activation, Dense, RepeatVector
from keras.layers import recurrent
from sklearn.utils import shuffle
import numpy as np
"""
An implementation of sequence to sequence learning for performing addition
Input: "535+61"
Output: "596"
Padding is handled by using a repeated sentinel character (space)
By default, the JZS1 recurrent neural network is used
JZS1 was an "evolved" recurrent neural network performing well on arithmetic benchmark in:
"An Empirical Exploration of Recurrent Network Architectures"
http://jmlr.org/proceedings/papers/v37/jozefowicz15.pdf
Input may optionally be inverted, shown to increase performance in many tasks in:
"Learning to Execute"
http://arxiv.org/abs/1410.4615
and
"Sequence to Sequence Learning with Neural Networks"
http://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf
Theoretically it introduces shorter term dependencies between source and target.
Two digits inverted:
+ One layer JZS1 (128 HN), 5k training examples = 99% train/test accuracy in 55 epochs
Three digits inverted:
+ One layer JZS1 (128 HN), 50k training examples = 99% train/test accuracy in 100 epochs
Four digits inverted:
+ One layer JZS1 (128 HN), 400k training examples = 99% train/test accuracy in 20 epochs
Five digits inverted:
+ One layer JZS1 (128 HN), 550k training examples = 99% train/test accuracy in 30 epochs
"""
class CharacterTable(object):
"""
Given a set of characters:
+ Encode them to a one hot integer representation
+ Decode the one hot integer representation to their character output
+ Decode a vector of probabilties to their character output
"""
def __init__(self, chars, maxlen):
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
self.maxlen = maxlen
def encode(self, C, maxlen=None):
maxlen = maxlen if maxlen else self.maxlen
X = np.zeros((maxlen, len(self.chars)))
for i, c in enumerate(C):
X[i, self.char_indices[c]] = 1
return X
def decode(self, X, calc_argmax=True):
if calc_argmax:
X = X.argmax(axis=-1)
return ''.join(self.indices_char[x] for x in X)
class colors:
ok = '\033[92m'
fail = '\033[91m'
close = '\033[0m'
# Parameters for the model and dataset
TRAINING_SIZE = 50000
DIGITS = 3
INVERT = True
# Try replacing JZS1 with LSTM, GRU, or SimpleRNN
RNN = recurrent.JZS1
HIDDEN_SIZE = 128
BATCH_SIZE = 128
LAYERS = 1
MAXLEN = DIGITS + 1 + DIGITS
chars = '0123456789+ '
ctable = CharacterTable(chars, MAXLEN)
questions = []
expected = []
seen = set()
print('Generating data...')
while len(questions) < TRAINING_SIZE:
f = lambda: int(''.join(np.random.choice(list('0123456789')) for i in xrange(np.random.randint(1, DIGITS + 1))))
a, b = f(), f()
# Skip any addition questions we've already seen
# Also skip any such that X+Y == Y+X (hence the sorting)
key = tuple(sorted((a, b)))
if key in seen:
continue
seen.add(key)
# Pad the data with spaces such that it is always MAXLEN
q = '{}+{}'.format(a, b)
query = q + ' ' * (MAXLEN - len(q))
ans = str(a + b)
# Answers can be of maximum size DIGITS + 1
ans += ' ' * (DIGITS + 1 - len(ans))
if INVERT:
query = query[::-1]
questions.append(query)
expected.append(ans)
print('Total addition questions:', len(questions))
print('Vectorization...')
X = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)
y = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=np.bool)
for i, sentence in enumerate(questions):
X[i] = ctable.encode(sentence, maxlen=MAXLEN)
for i, sentence in enumerate(expected):
y[i] = ctable.encode(sentence, maxlen=DIGITS + 1)
# Shuffle (X, y) in unison as the later parts of X will almost all be larger digits
X, y = shuffle(X, y)
# Explicitly set apart 10% for validation data that we never train over
split_at = len(X) - len(X) / 10
(X_train, X_val) = (slice_X(X, 0, split_at), slice_X(X, split_at))
(y_train, y_val) = (y[:split_at], y[split_at:])
print('Build model...')
model = Sequential()
# "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
model.add(RNN(len(chars), HIDDEN_SIZE))
# For the decoder's input, we repeat the encoded input for each time step
model.add(RepeatVector(DIGITS + 1))
# The decoder RNN could be multiple layers stacked or a single layer
for _ in xrange(LAYERS):
model.add(RNN(HIDDEN_SIZE, HIDDEN_SIZE, return_sequences=True))
# For each of step of the output sequence, decide which character should be chosen
model.add(Dense(HIDDEN_SIZE, len(chars)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# Train the model each generation and show predictions against the validation dataset
for iteration in range(1, 200):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=BATCH_SIZE, nb_epoch=1, validation_data=(X_val, y_val), show_accuracy=True)
###
# Select 10 samples from the validation set at random so we can visualize errors
for i in xrange(10):
ind = np.random.randint(0, len(X_val))
rowX, rowy = X_val[np.array([ind])], y_val[np.array([ind])]
preds = model.predict_classes(rowX, verbose=0)
q = ctable.decode(rowX[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
print('Q', q[::-1] if INVERT else q)
print('T', correct)
print(colors.ok + '☑' + colors.close if correct == guess else colors.fail + '☒' + colors.close, guess)
print('---')
| mit |
LTLMoP/LTLMoP | doc/conf.py | 7 | 7828 | # -*- coding: utf-8 -*-
#
# LTLMoP documentation build configuration file, created by
# sphinx-quickstart on Sat Jan 30 19:27:28 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../src'))
sys.path.append(os.path.abspath('../src/lib'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'LTLMoP'
copyright = u'2006-2014, Cameron Finucane, Gangyuan (Jim) Jing, Hadas Kress-Gazit, et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'LTLMoPdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'LTLMoP.tex', u'LTLMoP Documentation',
u'Cameron Finucane, Gangyuan (Jim) Jing, Hadas Kress-Gazit', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
##########################
#### below is dependency workaround code from http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
import sys
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
self.eps = 0 # hack for numpy
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['wx', 'wx.richtext', 'wx.stc', 'wx.grid', 'wx.lib',
'wx.lib.buttons', 'wx.lib.intctrl', 'wxversion', 'Polygon', 'Polygon.Shapes',
'Polygon.IO', 'Polygon.Utils', 'playerc', 'numpy', 'numpy.ma', 'numpy.matlib',
'scipy', 'scipy.linalg', 'scipy.optimize', 'ompl', 'roslib', 'rospy', 'gazebo',
'gazebo.srv', 'matplotlib', 'matplotlib.pyplot', 'matplotlib.cbook', 'std_msgs',
'std_msgs.msg', 'tf', 'tf.transformations', 'matplotlib.backends',
'matplotlib.backends.backend_tkagg', 'matplotlib.figure', 'matplotlib.axes',
'matplotlib.transforms', 'pycudd']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
| gpl-3.0 |
emhuff/regularizedInversion | testInference_nd.py | 1 | 15630 | #!/usr/bin/env python
import matplotlib as mpl
#mpl.use('Agg')
import argparse
import matplotlib.pyplot as plt
import lfunc
import mapfunc
import sys
import numpy as np
import healpy as hp
import esutil
import numpy.lib.recfunctions as recfunctions
def fluxToMag(mag, zeropoint = 25.):
return 10.**((zeropoint - mag)/2.5)
def magToFlux(flux, zeropoint = 25.):
mag = zeropoint - 2.5*np.log10(flux)
return mag
def addMagnitudes(mag1, mag2, zeropoint = 25.):
flux1 = fluxToMag(mag1, zeropoint = zeropoint)
flux2 = fluxToMag(mag2, zeropoint = zeropoint)
newFlux = flux1 + flux2
newMag = zeropoint - 2.5*np.log10(newFlux)
return newMag
def generateGalaxyTruthCatalog(n_obj = 10000, slope = 2.0, starting_index = 0):
# Draw catalog entries from some simple distribution.
# (A catalog should be a Numpy recarray, or similar, so we can get arrays indexed by keyword.)
# (the catalog should have a 'mag' and an 'index' field)
mag = []
while len(mag) < n_obj:
thismag = 15 + 15*np.random.power(1+slope)
if np.random.rand() > np.exp((thismag - 26)/3.):
mag.append(thismag)
mag = np.array(mag)
log_size = (-0.287* mag + 4.98) + (0.0542* mag - 0.83 )* np.random.randn(n_obj)
flux = fluxToMag(mag)
size = np.exp(log_size) # Fortunately, this is basically in arcsec
surfaceBrightness = flux / size / size / np.pi
# A typical sky brightness is 22.5 mag / arcsec^2
sky_sb = np.repeat( 10.**( -(21.5 - 22.5) / 2.5 ), n_obj)
sky_flux = np.pi * size * size * sky_sb
# The flux calibration will be total photon counts per unit of flux, integrated over the exposure time.
# It's really just there to give us a noise model.
calibration = np.repeat(100.,n_obj)
#error = np.sqrt( (sky_flux + flux) * calibration ) / calibration
index = np.arange(int(n_obj)) + starting_index
catalog = np.empty((n_obj),dtype=[('mag',mag.dtype),('balrog_index',index.dtype),
('size',size.dtype),
('flux',flux.dtype),
('stellarity',np.int),
('calibration',calibration.dtype),
('blended',type(True))])
catalog['mag'] = mag
catalog['flux'] = flux
catalog['balrog_index'] = index
catalog['size'] = size
catalog['blended'] = False
catalog['calibration'] = calibration
catalog['stellarity'] = 0
catalog = recfunctions.append_fields(catalog, 'data_truth', catalog['mag'])
catalog = np.array(catalog)
return catalog
def generateStarTruthCatalog(n_obj = 10000, slope = 1.0, starting_index = 0):
stars = generateGalaxyTruthCatalog(n_obj = n_obj, slope = slope)
stars['size'] = 0.
stars['stellarity'] = 1
return stars
def blend(catalog, blend_fraction=0.1):
# Choose two random subsets of the galaxies.
# Add the flux of the first subset to that of the second, then remove the first.
subset1 = np.random.choice(catalog,np.round(blend_fraction*catalog.size),replace=False)
subset2 = np.random.choice(catalog,np.round(blend_fraction*catalog.size),replace=False)
for entry1, entry2 in zip(subset1, subset2):
newMag = addMagnitudes(entry1['mag'], entry2['mag'])
ii = (catalog['balrog_index'] == entry1['balrog_index'])
catalog[ii]['mag'] = newMag
catalog[ii]['flux'] = magToFlux(newMag)
catalog[ii]['blended'] = True
catalog[ii]['size'] = np.max( (entry1['size'], entry2['size']) )
catalog[ii]['stellarity'] = 0
keep = np.in1d(catalog['balrog_index'], subset2['balrog_index'], assume_unique=True, invert=True)
if np.sum(keep) == 0:
stop
catalog = catalog[keep]
return catalog
def applyTransferFunction(catalog, SN_cut = 5., cbias = 0.0, mbias = 0.0, blend_fraction = 0.00, psf_size = 0.00):
# This will add noise to the catalog entries, and apply cuts to remove some unobservable ones.
# The noise is not trivial.
obs_catalog = catalog.copy()
# Apply psf.
size_obs = np.sqrt( psf_size**2 + obs_catalog['size']**2 )
obs_catalog['size'] = size_obs
sky_sb = np.repeat( 10.**( -(21.5 - 22.5) / 2.5 ), catalog.size)
sky_flux = np.pi * size_obs * size_obs * sky_sb
# Generate a noise vector based on the errors.
flux_error = np.sqrt( (sky_flux + obs_catalog['flux']) * obs_catalog['calibration'] ) / obs_catalog['calibration']
size_error = 2 * size_obs * flux_error / obs_catalog['flux']
flux_noise = flux_error * np.random.standard_cauchy(size=len(obs_catalog))
size_noise1 = size_error * np.random.randn(len(obs_catalog))
size_noise2 = size_error * np.random.randn(len(obs_catalog))
newFlux = obs_catalog['flux'] + flux_noise
newSize = np.sqrt( ( (size_obs + size_noise1)**2 + (size_obs + size_noise2)**2 )/2)
newMag = 25. - 2.5*np.log10(newFlux)
obs_catalog['mag'] = newMag
obs_catalog['flux'] = newFlux
obs_catalog['size'] = newSize
# Now recalculate the surface brightness.
SB_new = newFlux / newSize**2 / np.pi + sky_sb
# Apply a selection based on the new, noisy surface brightness. Take things that are >5 sigma above sky.
# Blend. This happens after everything else.
if blend_fraction > 0.0:
obs_catalog = blend(obs_catalog, blend_fraction = blend_fraction)
obs_catalog = obs_catalog[(SB_new > 3. * sky_sb)]
return obs_catalog
def generateTruthCatalog(n_gal = 10000, n_star = 1000, gal_slope = 2.5, star_slope = 1.2):
stars = generateStarTruthCatalog(n_obj = n_star, slope = star_slope)
maxIndex = np.max(stars['balrog_index'])
galaxies = generateGalaxyTruthCatalog(n_obj = n_gal, slope = gal_slope, starting_index = maxIndex+1)
catalog = np.hstack( (stars, galaxies) )
np.random.shuffle(catalog)
return catalog
def main(argv):
# Generate a simulated simulated truth catalog.
psf_size = 0.5
catalog_calib_truth = generateTruthCatalog(n_gal = 2e5, n_star = 2e4, gal_slope = 2.5, star_slope = 1.20)
catalog_calib_obs = applyTransferFunction(catalog_calib_truth, psf_size = psf_size)
catalog_sim_truth = generateTruthCatalog(n_gal = 2e5, n_star = 2e4, gal_slope = 2.0, star_slope = 1.00)
catalog_sim_obs = applyTransferFunction(catalog_sim_truth, psf_size = psf_size)
ind1, ind2 = esutil.numpy_util.match(catalog_calib_truth['balrog_index'],catalog_calib_obs['balrog_index'])
stars = catalog_calib_obs['stellarity'] == 1
gals = catalog_calib_obs['stellarity'] == 0
plt.plot(catalog_calib_obs['size'][gals], catalog_calib_obs['mag'][gals],',',color='green')
plt.plot(catalog_calib_obs['size'][stars], catalog_calib_obs['mag'][stars],',',color='blue')
plt.axvline(psf_size,linestyle='--',color='red')
plt.gca().invert_yaxis()
plt.show(block=False)
bins = np.linspace( 0.3,1.0, 300)
plt.hist(catalog_calib_obs['size'],bins=bins,color='blue',label='all')
plt.hist(catalog_calib_obs[catalog_calib_obs['stellarity'] == 0]['size'],bins=bins,color='yellow',label='galaxies',alpha=0.5)
plt.hist(catalog_calib_obs[catalog_calib_obs['stellarity'] == 1]['size'],bins=bins,color='orange',label='stars',alpha=0.5)
plt.axvline(psf_size*1.04,linestyle='--',color='red')
plt.xlim([0.33,1.0])
plt.legend(loc='best')
plt.show(block=False)
bins = np.linspace( 15, 23., 200)
plt.hist(catalog_calib_obs['mag'],bins=bins,color='blue',label='all')
plt.hist(catalog_calib_obs[catalog_calib_obs['stellarity'] == 0]['mag'],bins=bins,color='yellow',label='galaxies',alpha=0.5)
plt.hist(catalog_calib_obs[catalog_calib_obs['stellarity'] == 1]['mag'],bins=bins,color='orange',label='stars',alpha=0.5)
plt.legend(loc='best')
plt.show(block=False)
obsStar = catalog_calib_obs['size'] <= psf_size * 1.02
obsGal = catalog_calib_obs['size'] > psf_size * 1.02
catalog_calib_obs['stellarity'][obsStar] = 1
catalog_calib_obs['stellarity'][obsGal] = 0
truthMatched = catalog_calib_truth.copy()
truthMatched = truthMatched[ind1]
catalog_calib_obs = catalog_calib_obs[ind2]
obsMagBins = np.linspace(15,23,30)
truthMagBins = np.linspace(15,23,30)
starBins = np.array([-1, 0.5, 2])
reconBins = [truthMagBins, starBins]
obsBins = [obsMagBins, starBins]
obsStar = catalog_sim_obs['size'] <= psf_size * 1.02
obsGal = catalog_sim_obs['size'] > psf_size * 1.02
catalog_sim_obs['stellarity'][obsStar] = 1
catalog_sim_obs['stellarity'][obsGal] = 0
fig, (ax1, ax2) = plt.subplots(nrows = 1, ncols = 2, figsize = (14,7))
in_var = truthMatched['mag'] + 1.01*np.max(truthMatched['mag']) * truthMatched['stellarity']
out_var = catalog_calib_obs['mag'] + 1.01*np.max(catalog_calib_obs['mag']) * catalog_calib_obs['stellarity']
ax1.plot(in_var, out_var,',')
ax1.set_xlabel('truth mag/stellarity')
ax1.set_ylabel('obs mag/stellarity')
L = lfunc.makeLikelihoodMatrix(sim= catalog_calib_obs, truth=catalog_calib_truth, truthMatched = truthMatched,
obs_bins = obsBins, truth_bins = reconBins, simTag = ['mag','stellarity'],
truthTag = ['mag', 'stellarity'])
ax2.imshow(np.arcsinh(L/0.001), origin='lower', cmap=plt.cm.Greys)
ax2.set_xlabel('truth mag/stellarity')
ax2.set_ylabel('obs mag/stellarity')
fig.savefig("nd-likelihood_test-mag_stellarity.png")
plt.show(block=False)
N_sim_obs, _ = np.histogramdd([catalog_sim_obs['mag'],catalog_sim_obs['stellarity']], bins = obsBins)
N_obs_plot,_ = np.histogramdd([catalog_sim_obs['mag'],catalog_sim_obs['stellarity']], bins = reconBins)
N_sim_truth, _ = np.histogramdd([catalog_sim_truth['mag'], catalog_sim_truth['stellarity']], bins= reconBins)
obsShape = N_sim_obs.shape
truthShape = N_sim_truth.shape
N_sim_obs_flat = np.ravel(N_sim_obs, order='F')
N_sim_truth_flat = np.ravel(N_sim_truth, order='F')
A = L.copy()
lambda_reg = 0.0001
Ainv = np.dot( np.linalg.pinv(np.dot(A.T, A) + lambda_reg * np.identity(N_sim_truth_flat.size ) ), A.T)
#Ainv = np.linalg.pinv(A)
N_real_truth_flat = np.dot(Ainv, N_sim_obs_flat)
N_real_truth = np.reshape(N_real_truth_flat, truthShape, order='F')
plt.plot( (truthMagBins[0:-1] + truthMagBins[1:])/2. , N_real_truth[:,0],'--', label='galaxies (est.)')
plt.plot( (truthMagBins[0:-1] + truthMagBins[1:])/2. , N_real_truth[:,1],'--', label = 'stars (est)')
plt.plot( (truthMagBins[0:-1] + truthMagBins[1:] )/2. , N_obs_plot[:,0], '.', label='galaxies (obs.)')
plt.plot( (truthMagBins[0:-1] + truthMagBins[1:] )/2. , N_obs_plot[:,1], '.', label='stars (obs.)')
N_gal_hist, _ = np.histogram(catalog_sim_truth['mag'][catalog_sim_truth['stellarity'] == 0],bins=truthMagBins)
N_star_hist, _ = np.histogram(catalog_sim_truth['mag'][catalog_sim_truth['stellarity'] == 1], bins=truthMagBins)
plt.plot( (truthMagBins[0:-1] + truthMagBins[1:])/2. , N_gal_hist , label='galaxies (true)')
plt.plot( (truthMagBins[0:-1] + truthMagBins[1:])/2., N_star_hist, label='stars (true)')
plt.legend(loc='best')
plt.yscale('log')
plt.ylim([1,3.*np.max(N_sim_truth)])
plt.savefig("nd-reconstruction_test-mag_stellarity.png")
plt.show(block=False)
# --------------------------------------------------
obsMagBins = np.linspace(15,23,30)
truthMagBins = np.linspace(15,25,35)
obsSizeBins = np.linspace(0, 2, 30)
truthSizeBins = np.linspace(0,2,35)
obsMagBins_cen = ( obsMagBins[0:-1] + obsMagBins[1:] )/2.
truthMagBins_cen = ( truthMagBins[0:-1] + truthMagBins[1:] ) /2.
obsSizeBins_cen = ( obsSizeBins[0:-1] + obsSizeBins[1:] ) / 2.
truthSizeBins_cen = ( truthSizeBins[0:-1] + truthSizeBins[1:] ) /2.
reconBins = [truthMagBins, truthSizeBins]
obsBins = [obsMagBins, obsSizeBins]
fig, ax2 = plt.subplots(nrows = 1, ncols = 1)
L = lfunc.makeLikelihoodMatrix(sim= catalog_calib_obs, truth=catalog_calib_truth, truthMatched = truthMatched,
obs_bins = obsBins, truth_bins = reconBins, simTag = ['mag','size'],
truthTag = ['mag', 'size'])
ax2.imshow(np.arcsinh(L/0.001), origin='lower', cmap=plt.cm.Greys)
ax2.set_xlabel('truth mag/size')
ax2.set_ylabel('obs mag/size')
fig.savefig("nd-likelihood_test-mag_size.png")
plt.show(block=False)
N_sim_truth, _ = np.histogramdd([catalog_sim_truth['mag'], catalog_sim_truth['size']], bins= reconBins)
N_sim_obs, _ = np.histogramdd([catalog_sim_obs['mag'],catalog_sim_obs['size']], bins = obsBins)
N_obs_plot, _ = np.histogramdd([catalog_sim_obs['mag'],catalog_sim_obs['size']], bins = reconBins)
truthShape = N_sim_truth.shape
N_sim_obs_flat = np.ravel(N_sim_obs, order='F')
N_sim_truth_flat = np.ravel(N_sim_truth, order='F')
A = L.copy()
lambda_reg = 0.01
Ainv = np.dot( np.linalg.pinv(np.dot(A.T, A) + lambda_reg * np.identity(N_sim_truth_flat.size ) ), A.T)
N_real_truth_flat = np.dot(Ainv, N_sim_obs_flat)
N_real_truth = np.reshape(N_real_truth_flat, truthShape, order='F')
fig, ( (ax1, ax2, ax3), (ax4, ax5, _)) = plt.subplots(nrows=2,ncols=3,figsize=(19,13))
ax1.set_xlabel('size (arcsec)')
ax1.set_ylabel('mag')
im1 = ax1.imshow(np.arcsinh(N_sim_truth/0.01),origin='lower',cmap=plt.cm.Greys, extent = [truthSizeBins_cen[0],truthSizeBins_cen[-1],truthMagBins_cen[0],truthMagBins_cen[-1]], aspect = 'auto')
ax1.set_title('truth')
im2 = ax2.imshow(np.arcsinh(N_real_truth/0.01), origin='lower',cmap=plt.cm.Greys,vmin=0., extent = [truthSizeBins_cen[0],truthSizeBins_cen[-1],truthMagBins_cen[0],truthMagBins_cen[-1]], aspect = 'auto')
ax2.set_title('reconstruction')
im3 = ax3.imshow(np.arcsinh(N_obs_plot/0.01),origin='lower',cmap=plt.cm.Greys, extent = [truthSizeBins_cen[0],truthSizeBins_cen[-1],truthMagBins_cen[0],truthMagBins_cen[-1]], aspect = 'auto')
ax3.set_title('uncorrected observations')
im4 = ax4.imshow(np.arcsinh(( N_real_truth / N_sim_truth-1 )),origin='lower',cmap=plt.cm.seismic, extent = [truthSizeBins_cen[0],truthSizeBins_cen[-1],truthMagBins_cen[0],truthMagBins_cen[-1]], aspect = 'auto', vmax=5, vmin = -5)
ax4.set_title('reconstr. / truth -1 \n (frac. residuals)')
im5 = ax5.imshow(np.arcsinh(( N_obs_plot / N_sim_truth-1 )),origin='lower',cmap=plt.cm.seismic, extent = [truthSizeBins_cen[0],truthSizeBins_cen[-1],truthMagBins_cen[0],truthMagBins_cen[-1]], aspect = 'auto', vmax=5, vmin = -5)
ax5.set_title('observed / truth -1 \n (frac. residuals)')
fig.colorbar(im1,ax=ax1)
ax1.axvline(psf_size,color='green',linestyle='--',alpha=0.75)
ax2.axvline(psf_size,color='green',linestyle='--',alpha=0.75)
ax3.axvline(psf_size,color='green',linestyle='--',alpha=0.75)
ax4.axvline(psf_size,color='green',linestyle='--',alpha=0.75)
ax5.axvline(psf_size,color='green',linestyle='--',alpha=0.75)
fig.colorbar(im2,ax=ax2)
fig.colorbar(im3,ax=ax3)
fig.colorbar(im4,ax=ax4)
fig.colorbar(im5,ax=ax5)
fig.savefig('nd-reconstruction_test-mag_size.png')
fig.show()
#--------------------------------------------------
stop
if __name__ == "__main__":
import pdb, traceback
try:
main(sys.argv)
except:
thingtype, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
| mit |
fredhusser/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
RosesTheN00b/BudgetButlerWeb | butler_offline/core/database/sparen/orderdauerauftrag.py | 1 | 3968 | from butler_offline.core.database.database_object import DatabaseObject
from butler_offline.core.frequency import get_function_for_name
from datetime import datetime
import pandas as pd
from datetime import date
class OrderDauerauftrag(DatabaseObject):
TABLE_HEADER = ['Startdatum', 'Endedatum', 'Rhythmus', 'Name', 'Konto', 'Depotwert', 'Wert']
def __init__(self):
super().__init__(self.TABLE_HEADER)
def add(self, startdatum, endedatum, rhythmus, name, konto, depotwert, wert):
neue_order = pd.DataFrame(
[[startdatum, endedatum, rhythmus, name, konto, depotwert, wert]],
columns=self.TABLE_HEADER)
self.content = self.content.append(neue_order, ignore_index=True)
self.taint()
self._sort()
def get_all(self):
return self.content
def parse(self, raw_table):
raw_table['Startdatum'] = raw_table['Startdatum'].map(lambda x: datetime.strptime(x, '%Y-%m-%d').date())
raw_table['Endedatum'] = raw_table['Endedatum'].map(lambda x: datetime.strptime(x, '%Y-%m-%d').date())
self.content = self.content.append(raw_table, ignore_index=True)
self.content = self.content.sort_values(by=['Startdatum'])
def edit(self, index, startdatum, endedatum, rhythmus, name, konto, depotwert, wert):
self.edit_element(index, {
'Startdatum': startdatum,
'Endedatum': endedatum,
'Rhythmus': rhythmus,
'Name': name,
'Konto': konto,
'Depotwert': depotwert,
'Wert': wert
})
def get_all_order_until_today(self):
all_rows = pd.DataFrame()
for _, row in self.content.iterrows():
dauerauftrag_buchungen = self._order_until_today(
row['Startdatum'],
row['Endedatum'],
row['Rhythmus'],
row['Name'],
row['Konto'],
row['Depotwert'],
row['Wert'])
for buchung in dauerauftrag_buchungen:
all_rows = all_rows.append(buchung, ignore_index=True)
return all_rows
def _order_until_today(self,
startdatum,
endedatum,
frequenzfunktion,
name,
konto,
depotwert,
wert,):
laufdatum = startdatum
frequency_function = get_function_for_name(frequenzfunktion)
result = []
while laufdatum < date.today() and laufdatum < endedatum:
abbuchung = self._berechne_order(laufdatum, konto, depotwert, name, wert)
result.append(abbuchung)
laufdatum = frequency_function(laufdatum)
return result
def _berechne_order(self, laufdatum, konto, depotwert, name, wert):
return pd.DataFrame([[laufdatum, konto, depotwert, name, wert, True]],
columns=['Datum', 'Konto', 'Depotwert' , 'Name', 'Wert', 'Dynamisch'])
def _sort(self):
self.content = self.content.sort_values(by=['Startdatum', 'Endedatum', 'Name'])
self.content = self.content.reset_index(drop=True)
def aktuelle_raw(self):
dauerauftraege = self.content.copy()
dauerauftraege = dauerauftraege[dauerauftraege.Endedatum > date.today()]
return dauerauftraege[dauerauftraege.Startdatum < date.today()]
def aktuelle(self):
return self.frame_to_list_of_dicts(self.aktuelle_raw())
def past(self):
dauerauftraege = self.content.copy()
dauerauftraege = dauerauftraege[dauerauftraege.Endedatum < date.today()]
return self.frame_to_list_of_dicts(dauerauftraege)
def future(self):
dauerauftraege = self.content.copy()
dauerauftraege = dauerauftraege[dauerauftraege.Startdatum > date.today()]
return self.frame_to_list_of_dicts(dauerauftraege)
| agpl-3.0 |
khkaminska/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
totalgood/twip | docs/notebooks/06 Features -- Numeric.py | 1 | 1771 |
# coding: utf-8
# In[3]:
from __future__ import division, print_function, absolute_import
from past.builtins import basestring
import os
import pandas as pd
from matplotlib import pyplot as plt
from twip.constant import DATA_PATH
# In[4]:
import matplotlib
from IPython.display import display, HTML
get_ipython().magic(u'matplotlib inline')
np = pd.np
display(HTML("<style>.container { width:100% !important; }</style>"))
pd.set_option('display.max_rows', 6)
pd.set_option('display.max_columns', 250)
get_ipython().magic(u'pprint')
# In[5]:
df = pd.read_csv(os.path.join(DATA_PATH, 'cleaned_tweets.csv.gz'), low_memory=False)
rawlen = len(df)
df.drop_duplicates('id_str', keep='last', inplace=True)
rawlen - len(df)
# In[5]:
# df.get_
df.get_dtype_counts()
# In[9]:
dtypes = pd.Series([df[v].dtype for v in df.columns], index=df.columns)
dtypes
# In[6]:
mask = [col for col in df.columns if df[col].dtype in (bool, float, np.dtype('int64'))] # don't forget bool and int64 (not int)!
mask
numbers = df[mask]
numbers
# In[8]:
import gzip
with gzip.open(os.path.join(DATA_PATH, 'numbers.csv.gz'), 'wb') as f:
numbers.to_csv(os.path.join(DATA_PATH, 'numbers.csv.gz'), encoding='utf-8')
# In[6]:
numbers = pd.read_csv(os.path.join(DATA_PATH, 'numbers.csv.gz'), compression='gzip', engine='python')
# In[7]:
[col for col in numbers.columns if 'follow' in col]
# In[11]:
numbers.columns = [col.replace(' ', '_') for col in numbers.columns]
# In[12]:
cols = [col for col in numbers.columns if 'follow' in col]
# In[19]:
numbers.user_followers_count.hist()
plt.yscale('log', noposy='clip')
plt.ylabel('Tweets')
plt.xlabel('Followers')
# In[20]:
# group by user ID before doing plots based on user stats like followers
# In[ ]:
| mit |
joristork/milovision | main.py | 1 | 3082 | #!/usr/bin/env python
#
# Milovision: A camera pose estimation programme
#
# Copyright (C) 2013 Joris Stork
# See LICENSE.txt
#
# main.py
""" :synopsis: Contains the main initialising function but not the main
application loop, which is in pipeline.py. "milovision" is the
code name for this project.
.. moduleauthor:: joris stork <[email protected]>
"""
# standard and third party libraries
import sys
import os
import logging
import cv2
import signal
from pydc1394 import DC1394Library, camera
from pydc1394.cmdline import add_common_options, handle_common_options
import matplotlib.pyplot as plt
import numpy as np
import multiprocessing
# milovision libraries
from pipeline import Pipeline
from admin_modules import loginit
from admin_modules import argparse
from output import Printer
pipeline = None
def signal_handler(signal, frame):
""" enables clean shutdown with ctrl-c """
process_id = multiprocessing.current_process().name
if process_id == 'child':
return
logger = logging.getlogger('signal_handler')
logger.info('ctrl-c received.')
logger.info('telling pipeline to shutdown')
global pipeline
pipeline.shutdown()
def main():
"""
Parses arguments; initialises logger; initialises camera driver if
necessary; loads single image from disk if necessary; and runs desired parts
of pipeline, or loads output from previous execution for printout.
"""
options, args = argparse.run()
loginit.run(options.verbosity)
logger = logging.getLogger('main')
logger.info(' '.join(sys.argv[1:]))
if options.simulate == 0:
options.simulate = None
l = DC1394Library()
elif options.simulate > 0:
options.simulate -= 1
elif options.simtime is None:
options.simtime = 36000
global pipeline
pipeline = Pipeline(options)
if options.disk:
logger.info('using poses from disk')
pipe = Pipeline()
pipe.options = options
printer = Printer(pipe=pipe)
printer.final()
logger.info('done. exiting')
sys.exit(0)
if args:
try:
image = cv2.imread('images/'+args[0], cv2.CV_LOAD_IMAGE_GRAYSCALE)
pipeline.set_image(image)
logger.info('opening image file %s from disk' % args[0])
except IOError:
logger.error('image file not found: %s' % args[0])
exit(1)
elif options.simulate is not None:
logger.info('running in simulation mode')
else:
try:
fwcam = handle_common_options(options, l)
pipeline.set_fwcam(fwcam)
logger.info('init. pydc1394 camera object')
logger.info('camera: %s' % fwcam.model)
logger.info('mode: %s' % fwcam.mode)
logger.info('framerate: %d' % fwcam.framerate.val)
except:
logger.error('unable to open camera capture')
exit(1)
pipeline.run()
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
main()
| mit |
selective-inference/selective-inference | doc/examples/conditional_sampling.py | 3 | 2327 | """
We demonstrate that our optimization variables have
the correct distribution given the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.distributions import ECDF
from selection.randomized.tests.test_sampling import test_conditional_law
def main(ndraw=50000, burnin=5000, remove_atom=False, unpenalized=True, stepsize=1.e-2):
fig_idx = 0
for (rand,
mcmc_opt,
mcmc_omega,
truncated_opt,
truncated_omega) in test_conditional_law(ndraw=ndraw, burnin=burnin, stepsize=stepsize, unpenalized=unpenalized):
fig_idx += 1
fig = plt.figure(num=fig_idx, figsize=(8,8))
plt.clf()
idx = 0
for i in range(mcmc_opt.shape[1]):
plt.subplot(3,3,idx+1)
mcmc_ = mcmc_opt[:, i]
truncated_ = truncated_opt[:, i]
xval = np.linspace(min(mcmc_.min(), truncated_.min()),
max(mcmc_.max(), truncated_.max()),
200)
if remove_atom:
mcmc_ = mcmc_[mcmc_ < np.max(mcmc_)]
mcmc_ = mcmc_[mcmc_ > np.min(mcmc_)]
plt.plot(xval, ECDF(mcmc_)(xval), label='MCMC')
plt.plot(xval, ECDF(truncated_)(xval), label='truncated')
idx += 1
if idx == 1:
plt.legend(loc='lower right')
fig.suptitle(' '.join([rand, "opt"]))
fig_idx += 1
fig = plt.figure(num=fig_idx, figsize=(8,8))
plt.clf()
idx = 0
for i in range(mcmc_opt.shape[1]):
plt.subplot(3,3,idx+1)
mcmc_ = mcmc_omega[:, i]
truncated_ = truncated_omega[:, i]
xval = np.linspace(min(mcmc_.min(), truncated_.min()),
max(mcmc_.max(), truncated_.max()),
200)
if remove_atom:
mcmc_ = mcmc_[mcmc_ < np.max(mcmc_)]
mcmc_ = mcmc_[mcmc_ > np.min(mcmc_)]
plt.plot(xval, ECDF(mcmc_)(xval), label='MCMC')
plt.plot(xval, ECDF(truncated_)(xval), label='truncated')
idx += 1
if idx == 1:
plt.legend(loc='lower right')
fig.suptitle(' '.join([rand, "omega"]))
plt.show()
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
mlperf/training_results_v0.7 | Inspur/benchmarks/transformer/implementations/implementation_closed/logging/log_parsers/parse_mlperf.py | 1 | 13935 | # Copyright 2018 The MLPerf Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import csv
import sys
import re
import argparse
from datetime import datetime, timedelta
# Third-party modules
import pytz
import plotly.graph_objects as pgo
# Global Variables
# g_*_tz : to help with host/dut TZ differences
# g_power*td : manual tweaking of timedelta, adding or subtracting by seconds from the power log timestamps
# g_power_window* : how much time before (BEGIN) and after (END) loadgen timestamps to show data in graph
# g_power_stats* : when to start (after BEGIN) and end (before END) loadgen timestamps to calculate statistical data of graph
g_power_tz = None # pytz.timezone( 'US/Pacific' )
g_loadgen_tz = None # pytz.utc
g_power_add_td = timedelta(seconds=3600)
g_power_sub_td = timedelta(seconds=0)
g_power_window_before_td = timedelta(seconds=30)
g_power_window_after_td = timedelta(seconds=30)
g_power_stats_begin_td = timedelta(seconds=3)
g_power_stats_end_td = timedelta(seconds=3)
# Check command-line parameters and call respective functions
def main():
m_args = f_parseParameters()
if( m_args.loadgen_in != "" ):
f_parseLoadgen( m_args.loadgen_in, m_args.loadgen_out )
if( m_args.power_in != "" ):
f_parsePowerlog( m_args.power_in, m_args.power_out )
if( m_args.graph ):
f_graph_powerOverTime( m_args.loadgen_out, m_args.power_out )
# if( m_args.tmp != "" ):
# f_parseTemplog( m_args )
def f_graph_powerOverTime( p_loadgen_csv, p_power_csv ):
global g_power_tz
global g_loadgen_tz
global g_power_add_td
global g_power_sub_td
global g_power_window_before_td
global g_power_window_after_td
m_figure_volts = pgo.Figure() # title="Voltage (V) over Time" )
m_figure_watts = pgo.Figure() # title="Power (W) over Time" )
m_figure_amps = pgo.Figure() # title="Current (A) over Time" )
m_loadgen_data = []
m_power_data = []
m_workname = ""
m_scenario = ""
m_testmode = ""
m_power_state = ""
m_loadgen_ts = ""
m_power_ts = ""
# Parse and loadgen data
try:
print( f"graph: opening {p_loadgen_csv} for reading..." )
m_file = open( p_loadgen_csv, 'r' )
except:
print( f"graph: error opening file: {p_loadgen_csv}" )
exit(1)
# loadgen CSV must contain BEGIN and END timestamps
for m_line in m_file:
if( re.search("BEGIN", m_line) or re.search("END", m_line) ):
(m_workname, m_scenario, m_testmode, m_power_state, m_loadgen_ts, m_power_ts) = m_line.split(",", 5)
m_datetime = datetime.fromisoformat( m_power_ts.strip() )
m_loadgen_data.append( [m_workname, m_scenario, m_testmode, m_power_state, m_loadgen_ts, m_datetime] )
# Parse and power data
try:
print( f"graph: opening {p_power_csv} for reading..." )
m_file = open( p_power_csv, 'r' )
# m_power_data = pandas.read_csv( p_power_csv )
except:
print( f"graph: error opening file: {p_power_csv}", )
exit(1)
# power CSV must contain time and power
# skip first line of headers
next( m_file )
for m_line in m_file:
(m_date, m_time, m_power, m_volt, m_amps) = m_line.split(",")[:5]
m_datetime = datetime.fromisoformat( m_date + " " + m_time )
m_power_data.append( [m_datetime, m_power, m_volt, m_amps] )
m_loadgen_iter = iter( m_loadgen_data )
m_power_iter = iter( m_power_data )
for m_loadgen_entry in m_loadgen_iter:
m_trace_x_time = []
m_trace_y_watt = []
m_trace_y_volt = []
m_trace_y_amps = []
m_power_ts_begin = (m_loadgen_entry[5]).astimezone(g_loadgen_tz)
m_power_ts_end = (next(m_loadgen_iter)[5]).astimezone(g_loadgen_tz)
# print( m_power_ts_begin.strftime("%Y-%m-%D %H:%M:%S.%f")[:-3], "to", m_power_ts_end.strftime("%Y-%m-%D %H:%M:%S.%f")[:-3] )
m_counter = 0
for m_power_entry in m_power_iter:
#print( m_power_entry[0].strftime("%Y-%m-%D %H:%M:%S.%f")[:-3], m_power_ts_begin.strftime("%Y-%m-%D %H:%M:%S.%f")[:-3], m_power_ts_end.strftime("%Y-%m-%D %H:%M:%S.%f")[:-3] )
m_power_entry_ts = (m_power_entry[0].replace(tzinfo=g_power_tz)).astimezone(g_loadgen_tz) + g_power_add_td - g_power_sub_td
if( m_power_entry_ts < (m_power_ts_begin - g_power_window_before_td) ):
continue
if( m_power_entry_ts > (m_power_ts_end + g_power_window_after_td) ) :
break
# because of limitations of datetime, offset date by a fixed date
m_trace_x_time.append( datetime(2011,1,13) + (m_power_entry_ts - m_power_ts_begin) )
m_trace_y_watt.append( m_power_entry[1] )
m_trace_y_volt.append( m_power_entry[2] )
m_trace_y_amps.append( m_power_entry[3] )
m_counter = m_counter + 1
if( m_counter ):
m_figure_watts.add_trace( pgo.Scatter( x=m_trace_x_time, y=m_trace_y_watt,
mode="lines+markers",
name=f"{m_loadgen_entry[0]}, {m_loadgen_entry[1]}" ) )
m_figure_volts.add_trace( pgo.Scatter( x=m_trace_x_time, y=m_trace_y_volt,
mode="lines+markers",
name=f"{m_loadgen_entry[0]}, {m_loadgen_entry[1]}" ) )
m_figure_amps.add_trace( pgo.Scatter( x=m_trace_x_time, y=m_trace_y_amps,
mode="lines+markers",
name=f"{m_loadgen_entry[0]}, {m_loadgen_entry[1]}" ) )
m_figure_volts.update_layout( title={'text' : "Voltage over Time",
'x' : 0.5,
'y' : 0.95,
'xanchor': 'center',
'yanchor': 'top' },
xaxis_title="Time (offset between powerlog & loadgen timestamps)",
xaxis_tickformat='%H:%M:%S.%L',
yaxis_title="Volts (V)" )
m_figure_watts.update_layout( title={ 'text' : "Power over Time",
'x' : 0.5,
'y' : 0.95,
'xanchor': 'center',
'yanchor': 'top' },
xaxis_title="Time (offset between powerlog & loadgen timestamps)",
xaxis_tickformat='%H:%M:%S.%L',
yaxis_title="Watts (W)" )
m_figure_amps.update_layout( title={'text' : "Current over Time",
'x' : 0.5,
'y' : 0.95,
'xanchor': 'center',
'yanchor': 'top' },
xaxis_title="Time (offset between powerlog & loadgen timestamps)",
xaxis_tickformat='%H:%M:%S.%L',
yaxis_title="Amps (A)" )
m_figure_volts.show()
m_figure_watts.show()
m_figure_amps.show()
# Parse Loadgen log files
# Speciy directory and search for ""*detail.txt" & "*summary.txt"
def f_parseLoadgen( p_dirin, p_fileout ):
m_workname = [ "resnet50", "resnet",
"mobilnet",
"gnmt",
"ssdmobilenet", "ssd-small",
"ssdresnet34", "ssd-large" ]
m_metric = { "offline" : "Samples per second",
"multistream" : "Samples per query",
"singlestream" : "90th percentile latency (ns)",
"server" : "Schehduled Samples per second" }
m_scenario = ""
m_testname = ""
m_testmode = ""
m_loadgen_ts = 0
m_power_ts = ""
m_power_state = ""
m_score_value = 0
m_score_valid = ""
m_counter = 0
m_storage = []
m_storage.append( ["Workload", "Scenario", "Mode", "State", "Loadgen TS", "System Date", "System Time", "Result", "Score", "Metric"] )
# Assumes both *detail.txt and *summary.txt files exists
for m_dirname, m_subdirs, m_filelist in os.walk( p_dirin ):
for m_filename in m_filelist:
if m_filename.endswith( 'detail.txt' ):
m_counter = m_counter + 1
m_fullpath = os.path.join(m_dirname, m_filename)
for m_re in m_workname:
if( re.search( m_re, m_fullpath, re.I ) ):
m_testname = m_re
for m_re in m_metric.keys():
if( re.search( m_re, m_fullpath, re.I ) ):
m_scenario = m_re
try:
m_file = open( m_fullpath, 'r' )
except:
print( "error opening file:", m_fullpath )
exit(1)
for m_line in m_file:
# Date format is YYYY-MM-DD HH:MM:SS
if( re.search('time of test', m_line) ):
m_testmode = ""
m_power_state = "INIT"
# m_power_ts = (re.search("(\d*)-(\d*-\d\d).*(\d\d:\d*:\d*)Z$", m_line)).groups()
# m_power_ts = m_power_ts[1] + "-" + m_power_ts[0] + " " + m_power_ts[2] + ".000"
m_power_ts = (re.search("(\d*-\d*-\d\d).*(\d\d:\d*:\d*)Z$", m_line)).groups()
m_power_ts = m_power_ts[0] + " " + m_power_ts[1] + ".000"
elif( re.search( 'Starting ' + m_testmode + ' mode', m_line) ):
m_testmode = "START"
m_power_state = ""
m_power_ts = ""
# Date format is MM-DD-YYYY HH:MM:SSS.mmm
elif( re.search( "POWER_", m_line) ):
m_power_state = (re.search( "POWER_(\w*)", m_line)).group(1)
m_power_ts = (re.search('(\d*-\d*)-(\d*)( \d*:\d*:\d*\.\d*)$', m_line)).groups()
m_power_ts = m_power_ts[1] + "-" + m_power_ts[0] + m_power_ts[2]
elif( re.search('pid', m_line) and re.search('Scenario', m_line) ):
m_scenario = (re.search( '(\w*\s?\w*)$', m_line )).group(1)
m_scenario = (m_scenario.replace( " ", "" )).lower()
continue
elif( re.search('Test mode', m_line) ): # and re.search('accuracy', m_line, re.I) ):
m_testmode = (re.search( "Test mode : (\w*)", m_line)).group(1)
continue
else:
continue
m_loadgen_ts = (re.search( '(\d*)ns', m_line)).group(1)
(m_power_ts_date, m_power_ts_time) = m_power_ts.split()
m_storage.append( [m_testname, m_scenario, m_testmode, m_power_state, m_loadgen_ts, m_power_ts_date, m_power_ts_time] )
# Most parameters should be already filled (e.g. testname, scenario, mode)
elif m_filename.endswith( 'summary.txt' ):
m_fullpath = os.path.join(m_dirname, m_filename)
m_score_valid = ""
m_score_value = ""
try:
m_file = open( m_fullpath, 'r' )
except:
print( "error opening file:", m_fullpath )
exit(1)
m_power_state = "DONE"
for m_line in m_file:
if( re.search( "Result is", m_line) ):
m_score_valid = (re.search('Result is : (.*)$', m_line)).group(1)
elif( re.search( re.escape(m_metric[m_scenario.lower()]), m_line) ):
m_score_value = (re.search( "(\d*\.?\d*)$", m_line, re.I)).group(1)
# else:
# nothing
continue
m_storage.append( [m_testname, m_scenario, m_testmode, m_power_state, "", "", "", m_score_valid, m_score_value, m_metric[m_scenario.lower()]] )
print( "{} loadgen log files found and parsed".format(m_counter) )
print( "storing CSV data into:", p_fileout )
try:
with open( p_fileout, 'w', newline='') as m_file:
m_csvWriter = csv.writer( m_file, delimiter=',' )
for m_entry in m_storage:
m_csvWriter.writerow( m_entry )
m_file.close()
except:
print( "error while creating loadgen log csv output file:", p_fileout )
exit(1)
# Parse PTDaemon Power Log Filename
# Format should be:
# Time,MM-DD-YYYY HH:MM:SS.SSS,Watts,DD.DDDDDD,Volts,DDD,DDDDDD,Amps,D.DDDDDD,PF,D.DDDDDD,Mark,String
def f_parsePowerlog( p_filein, p_fileout ):
m_counter = 0
m_storage = []
try:
m_file = open( p_filein, 'r' )
print( "opening power log file:", p_filein )
except:
print( "error opening power log file:", p_filein )
exit(1)
# Create headers
# Relabel & split date & time for better parsing
m_line = m_file.readline()
m_line = m_line.replace( "Time", "Date", 1 )
m_line = m_line.replace( " ", ",Time,", 1)
m_storage.append( m_line.split(',')[::2] )
# Store data
for m_line in m_file :
m_counter = m_counter + 1
m_line = m_line.strip()
m_line = m_line.replace( "Time", "Date", 1 )
m_line = m_line.replace( " ", ",Time,", 1)
m_line = m_line.split(',')[1::2]
# need to re-order date to iso format
m_line[0] = m_line[0][-4:] + m_line[0][-5:-4] + m_line[0][:5]
m_storage.append( m_line )
m_file.close()
print( "done parsing PTDaemon power log. {} entries processed".format(m_counter) )
print( "storing CSV data into:", p_fileout )
try:
with open( p_fileout, 'w', newline='') as m_file:
m_csvWriter = csv.writer( m_file, delimiter=',' )
for m_entry in m_storage:
m_csvWriter.writerow( m_entry )
m_file.close()
except:
print( "error while creating PTDaemon power log csv output file:", p_fileout )
exit(1)
def f_parseParameters():
m_argparser = argparse.ArgumentParser()
# Filename options
# Input
m_argparser.add_argument( "-lgi", "--loadgen_in", help="Specify directory of loadgen log files",
default="" )
m_argparser.add_argument( "-pli", "--power_in", help="Specify PTDaemon power log file",
default="" )
# Output
m_argparser.add_argument( "-lgo", "--loadgen_out", help="Specify loadgen CSV output file",
default="loadgen_out.csv" )
m_argparser.add_argument( "-plo", "--power_out", help="Specify power CSV output file",
default="power_out.csv" )
# Function options
m_argparser.add_argument( "-g", "--graph", help="Draw/output graph of power over time (default input: output loadgen and power CSVs)",
action="store_true")
m_argparser.add_argument( "-s", "--stats", help="Calculates power stats based on timestamps (both power and loadgen logs required)",
action="store_true")
m_args = m_argparser.parse_args()
if( m_args.power_in == m_args.power_out ):
print( "Power log output file cannot be the same as power log input file!")
exit(1)
return m_args
if __name__ == '__main__':
main()
| apache-2.0 |
CGATOxford/CGATPipelines | obsolete/reports/pipeline_capseq/trackers/macs_genomic_locations.py | 1 | 1229 | import os
import sys
import re
import types
import itertools
import matplotlib.pyplot as plt
import numpy
import numpy.ma
import Stats
import Histogram
import cpgReport
from CGATReport.Tracker import *
from CGATReport.odict import OrderedDict as odict
##########################################################################
class genomicFeatures(cpgReport.cpgTracker):
"""return overlap of interval with genomic features """
mPattern = "_merged_genomic_features$"
def __call__(self, track, slice=None):
data = self.getAll( """SELECT feature_class, count(distinct gene_id) as intervals FROM (
SELECT gene_id,
CASE WHEN tss_extended_pover1 > 0 THEN 'TSS'
WHEN genes_pover1 > 0 THEN 'Gene'
WHEN upstream_flank_pover1 >0 THEN 'Upstream'
WHEN downstream_flank_pover1 THEN 'Downstream'
ELSE 'Intergenic'
END AS feature_class
FROM %(track)s_merged_genomic_features)
group by feature_class""" % locals() )
return data
| mit |
crawfordsm/pysalt | slottools/slotview.py | 1 | 5808 | ################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""SlotView is the interactive tool for analyzing lightcurves produced by
slotphot for SALT SALTICAM slotmode data.
Updates:
20110516
* Updated the code to use PyQt4 for the GUI backend
* Updated the code to handle the new error handling
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import time, math
import numpy as np
import scipy as sp
from pyraf import iraf
from pyraf.iraf import pysalt
import saltprint, salttime
import slottool as st
import Tkinter as Tk
from matplotlib.widgets import Cursor, SpanSelector, Slider, CheckButtons
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
# Gui library imports
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg
# Salt imports
from saltgui import ImageDisplay, MplCanvas
from salterror import SaltIOError
import saltsafeio as saltio
import saltsafekey as saltkey
from saltsafelog import logging
from SlotViewWindow import SlotViewWindow
debug=True
def slotview(newfits,indata , fileout, srcfile, fps=10.0, phottype='square', sigdet=5, contpix=10, \
driftlimit=10, clobber=True,logfile='slotview.log',verbose=True):
#set up the variables
status = 0
entries = []
vig_lo = {}
vig_hi = {}
hour = 0
min = 0
sec = 0.
time0 = 0.
nframes = 0
sleep=0
with logging(logfile,debug) as log:
#enter in the input data
saltio.fileexists(newfits)
#set the sleep parameter
if fps>0: sleep=1.0/(fps)
# read in the data file
id, time, ratio, rerr, tx, ty, tflux, terr, cx, cy, cflux, cerr=st.readlcfile(indata)
# read extraction region defintion file
amp, x, y, x_o, y_o, r, br1, br2=st.readsrcfile(srcfile)
#determine the size of the data arrays
struct = saltio.openfits(newfits)
naxis1 = saltkey.get('NAXIS1',struct[1])
naxis2 = saltkey.get('NAXIS2',struct[1])
# Plot all of the data and the first image
# Create GUI
App = QtGui.QApplication([])
aw=SlotViewWindow(struct, id, tflux, cflux, ratio, time, phottype, sleep, \
tx, ty, cx, cy, r, br1, br2, naxis1, naxis2, sigdet, contpix, driftlimit)
aw.show()
# Start application event loop
app_exit=App.exec_()
# Check if GUI was executed succesfully
if app_exit!=0:
raise SALTError('InterIdentify GUI has unexpected exit status '+str(exit))
ratio, tflux, cflux, gframe, newphot=aw.ratio, aw.tflux, aw.cflux, aw.goodframes, aw.newphot
#close the input file
saltio.closefits(struct)
# Update the indata file if necessary
lc=saltio.openascii(fileout,'w')
for i in range(len(ratio)):
x['target']=tx[i]
x['comparison']=cx[i]
y['target']=ty[i]
y['comparison']=cy[i]
reltime=False
if gframe[i]:
st.writedataout(lc, id[i], time[i], x, y, tflux[i], terr[i], \
cflux[i], cerr[i], ratio[i], rerr[i], time[0], reltime)
saltio.closeascii(lc)
# -----------------------------------------------------------
# main code
parfile = iraf.osfn("slottools$slotview.par")
t = iraf.IrafTaskFactory(taskname="slotview",value=parfile,function=slotview, pkgname='slottools')
| bsd-3-clause |
zihua/scikit-learn | sklearn/discriminant_analysis.py | 13 | 28628 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
.. versionchanged:: 0.17
Deprecated :class:`lda.LDA` have been moved to :class:`LinearDiscriminantAnalysis`.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self.n_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=None, tol=None):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("The parameter 'store_covariance' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariance = store_covariance
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
.. versionchanged:: 0.17
Deprecated :class:`qda.QDA` have been moved to :class:`QuadraticDiscriminantAnalysis`.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y, store_covariances=None, tol=None):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.17
Deprecated *store_covariance* have been moved to main constructor.
.. versionchanged:: 0.17
Deprecated *tol* have been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
if store_covariances:
warnings.warn("The parameter 'store_covariances' is deprecated as "
"of version 0.17 and will be removed in 0.19. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
self.store_covariances = store_covariances
if tol:
warnings.warn("The parameter 'tol' is deprecated as of version "
"0.17 and will be removed in 0.19. The parameter is "
"no longer necessary because the value is set via "
"the estimator initialisation or set_params method.",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
AntonioJBT/CGATPipeline_core | CGATPipelines/pipeline_testing.py | 1 | 19055 | """=================================================
pipeline_testing - automated testing of pipelines
=================================================
This pipeline executes other pipelines for testing purposes.
Overview
========
This pipeline implements automated testing of CGAT pipelines. The
pipeline downloads test data from a specified URL, runs the associated
pipeline for each data set and compares the output with a reference.
The results are collected in a report.
Tests are setup in the pipeline configuration file.
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
In order to run all tests, simply enter an empty directory and type::
python <srcdir>/pipeline_testing.py config
Edit the config files as required and then type::
python <srcdir>/pipeline_testing.py make full
python <srcdir>/pipeline_testing.py make build_report
The first command will download the data and run the pipelines while
the second will build a summary report.
Configuration
-------------
The pipeline requires a configured :file:`pipeline.ini` file.
Tests are described as section in the configuration file. A test
section starts with the prefix ``test_``. For following example is a
complete test setup::
[test_mytest1]
# pipeline to run
pipeline=pipeline_mapping
# pipeline target to run (default is 'full')
# multiple targets can be specified as a comma separated list.
target=full
# filename suffixes to checksum
regex_md5=gtf.gz,bed.gz,tsv.gz,bam,nreads
# regular expression of files to be excluded from
# test for difference. Use | to separate multiple
# regular expressions.
regex_only_exist=rates.gff.gz
This configuration will run the test ``mytest1``. The associated
pipeline is :doc:`pipeline_mapping` and it will execute the target
``make full``. To check if the pipeline has completed successfully, it
will compare all files ending with any of the suffixes specified
(``gtf.gz``, ``bed.gz``, etc). The comparison will be done by building
a checksum of the whole file ignoring any comments (lines starting
with a ``#``).
Some files will be different at every run, for example if they use
some form of random initialization. Thus, the exact test can be
relaxed for groups of files. Files matching the regular expression in
``regex_linecount`` will test if a file exists and the number of lines
are identitical. Files matching the regular expressions in
``regex_exist`` will thus only be tested if they exist or not.
The test expects a file called :file:`test_mytest1.tgz` with the
test data at the download URL (parameter ``data_url``).
To define a default test for a pipeline, simply name the
test ``test_<pipeline name>``, for example::
[test_mapping]
regex_md5=gtf.gz,bed.gz,tsv.gz,bam,nreads
Note that setting the ``target`` and ``pipeline`` options is
not necessary in this case as the default values suffice.
Input data
----------
The input data for each test resides a compressed tar-ball. The input
data should uncompress in a directory called :file:`<testname>.dir`
The tar-ball need also contain a file :file:`<testname>.ref`
containing the md5 checksums of files of a previous run of the test
that is being used as a reference.
The input data should contain all the data that is required for
running a test within a directory. It is best to minimize dependencies
between tests, though there is a mechanism for this (see below).
For example, the contents of a tar-ball will look light this::
test_mytest1.dir/ # test data root
test_mytest1.dir/Brain-F2-R1.fastq.gz # test data
test_mytest1.dir/Brain-F1-R1.fastq.gz
test_mytest1.dir/hg19.fasta # genomic data
test_mytest1.dir/hg19.idx
test_mytest1.dir/hg19.fa
test_mytest1.dir/hg19.fa.fai
test_mytest1.dir/pipeline.ini # pipeline configuration file
test_mytest1.dir/indices/ # configured to work in test dir
test_mytest1.dir/indices/bwa/ # bwa indices
test_mytest1.dir/indices/bwa/hg19.bwt
test_mytest1.dir/indices/bwa/hg19.ann
test_mytest1.dir/indices/bwa/hg19.pac
test_mytest1.dir/indices/bwa/hg19.sa
test_mytest1.dir/indices/bwa/hg19.amb
test_mytest1.ref # reference file
The reference file looks like this::
test_mytest1.dir/bwa.dir/Brain-F2-R2.bwa.bam 0e1c4ee88f0249c21e16d93ac496eddf
test_mytest1.dir/bwa.dir/Brain-F1-R2.bwa.bam 01bee8af5bbb5b1d13ed82ef1bc3620d
test_mytest1.dir/bwa.dir/Brain-F2-R1.bwa.bam 80902c87519b6865a9ca982787280972
test_mytest1.dir/bwa.dir/Brain-F1-R1.bwa.bam 503c99ab7042a839e56147fb1a221f27
...
This file is created by the test pipeline and called
:file:`test_mytest1.md5`. When setting up a test, start with an empty
files and later add this file to the test data.
Pipeline dependencies
---------------------
Some pipelines depend on the output of other pipelines, most notable
is :doc:`pipeline_annotations`. To run a set of pipelines before other
pipelines name them in the option ``prerequisites``, for example::
prerequisites=prereq_annnotations
Pipeline output
===============
The major output is in the database file :file:`csvdb`.
Code
====
"""
from ruffus import files, transform, suffix, follows, merge, collate, regex, mkdir
import sys
import pipes
import os
import re
import glob
import tarfile
import pandas
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
import CGATPipelines.Pipeline as P
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
PARAMS = P.PARAMS
# obtain prerequisite generic data
@files([(None, "%s.tgz" % x)
for x in P.asList(PARAMS.get("prerequisites", ""))])
def setupPrerequisites(infile, outfile):
'''setup pre-requisites.
These are tar-balls that are unpacked, but not run.
'''
to_cluster = False
track = P.snip(outfile, ".tgz")
# obtain data - should overwrite pipeline.ini file
statement = '''
wget --no-check-certificate -O %(track)s.tgz %(data_url)s/%(track)s.tgz'''
P.run()
tf = tarfile.open(outfile)
tf.extractall()
@files([(None, "%s.tgz" % x)
for x in P.CONFIG.sections()
if x.startswith("test")])
def setupTests(infile, outfile):
'''setup tests.
This method creates a directory in which a test will be run
and downloads test data with configuration files.
'''
to_cluster = False
track = P.snip(outfile, ".tgz")
if os.path.exists(track + ".dir"):
raise OSError('directory %s.dir already exists, please delete' % track)
# create directory
os.mkdir(track + ".dir")
# run pipeline config
pipeline_name = PARAMS.get(
"%s_pipeline" % track,
"pipeline_" + track[len("test_"):])
statement = '''
(cd %(track)s.dir;
python %(pipelinedir)s/%(pipeline_name)s.py
%(pipeline_options)s config) >& %(outfile)s.log
'''
P.run()
# obtain data - should overwrite pipeline.ini file
statement = '''
wget --no-check-certificate -O %(track)s.tgz %(data_url)s/%(track)s.tgz'''
P.run()
tf = tarfile.open(outfile)
tf.extractall()
if not os.path.exists("%s.dir" % track):
raise ValueError(
"test package did not create directory '%s.dir'" % track)
def runTest(infile, outfile):
'''run a test.
Multiple targets are run iteratively.
'''
track = P.snip(outfile, ".log")
pipeline_name = PARAMS.get(
"%s_pipeline" % track,
"pipeline_" + track[len("test_"):])
pipeline_targets = P.asList(
PARAMS.get("%s_target" % track,
"full"))
# do not run on cluster, mirror
# that a pipeline is started from
# the head node
to_cluster = False
template_statement = '''
(cd %%(track)s.dir;
python %%(pipelinedir)s/%%(pipeline_name)s.py
%%(pipeline_options)s make %s) >& %%(outfile)s
'''
if len(pipeline_targets) == 1:
statement = template_statement % pipeline_targets[0]
P.run(ignore_errors=True)
else:
statements = []
for pipeline_target in pipeline_targets:
statements.append(template_statement % pipeline_target)
P.run(ignore_errors=True)
# @follows(setupTests)
# @files([("%s.tgz" % x, "%s.log" % x)
# for x in P.asList(PARAMS.get("prerequisites", ""))])
# def runPreparationTests(infile, outfile):
# '''run pre-requisite pipelines.'''
# runTest(infile, outfile)
@follows(setupTests, setupPrerequisites)
@files([("%s.tgz" % x, "%s.log" % x)
for x in P.CONFIG.sections()
if x.startswith("test") and
x not in P.asList(PARAMS.get("prerequisites", ""))])
def runTests(infile, outfile):
'''run a pipeline with test data.'''
runTest(infile, outfile)
@transform(runTests,
suffix(".log"),
".report")
def runReports(infile, outfile):
'''run a pipeline report.'''
track = P.snip(outfile, ".report")
pipeline_name = PARAMS.get(
"%s_pipeline" % track,
"pipeline_" + track[len("test_"):])
statement = '''
(cd %(track)s.dir; python %(pipelinedir)s/%(pipeline_name)s.py
%(pipeline_options)s make build_report) >& %(outfile)s
'''
P.run(ignore_errors=True)
def compute_file_metrics(infile, outfile, metric, suffixes):
"""apply a tool to compute metrics on a list of files matching
regex_pattern."""
if suffixes is None or len(suffixes) == 0:
E.info("No metrics computed for {}".format(outfile))
IOTools.touchFile(outfile)
return
track = P.snip(infile, ".log")
# convert regex patterns to a suffix match:
# prepend a .*
# append a $
regex_pattern = " -or ".join(["-regex .*{}$".format(pipes.quote(x))
for x in suffixes])
E.debug("applying metric {} to files matching {}".format(metric,
regex_pattern))
if metric == "file":
statement = '''find %(track)s.dir
-type f
-not -regex '.*\/report.*'
-not -regex '.*\/_.*'
\( %(regex_pattern)s \)
| sort -k1,1
> %(outfile)s'''
else:
statement = '''find %(track)s.dir
-type f
-not -regex '.*\/report.*'
-not -regex '.*\/_.*'
\( %(regex_pattern)s \)
-exec %(pipeline_scriptsdir)s/cgat_file_apply.sh {} %(metric)s \;
| perl -p -e "s/ +/\\t/g"
| sort -k1,1
> %(outfile)s'''
P.run()
@follows(runReports)
@transform(runTests,
suffix(".log"),
".md5")
def buildCheckSums(infile, outfile):
'''build checksums for files in the build directory.
Files are uncompressed before computing the checksum
as gzip stores meta information such as the time stamp.
'''
track = P.snip(infile, ".log")
compute_file_metrics(
infile,
outfile,
metric="md5sum",
suffixes=P.asList(P.asList(PARAMS.get('%s_regex_md5' % track, ""))))
@transform(runTests,
suffix(".log"),
".lines")
def buildLineCounts(infile, outfile):
'''compute line counts.
Files are uncompressed before computing the number of lines.
'''
track = P.snip(infile, ".log")
compute_file_metrics(
infile,
outfile,
metric="wc -l",
suffixes=P.asList(P.asList(PARAMS.get('%s_regex_linecount' % track, ""))))
@transform(runTests,
suffix(".log"),
".exist")
def checkFileExistence(infile, outfile):
'''check whether file exists.
Files are uncompressed before checking existence.
'''
track = P.snip(infile, ".log")
compute_file_metrics(
infile,
outfile,
metric="file",
suffixes=P.asList(P.asList(PARAMS.get('%s_regex_exist' % track, ""))))
@collate((buildCheckSums, buildLineCounts, checkFileExistence),
regex("([^.]*).(.*)"),
r"\1.stats")
def mergeFileStatistics(infiles, outfile):
'''merge all file statistics.'''
to_cluster = False
infiles = " ".join(sorted(infiles))
statement = '''
%(pipeline_scriptsdir)s/merge_testing_output.sh
%(infiles)s
> %(outfile)s'''
P.run()
@merge(mergeFileStatistics,
"md5_compare.tsv")
def compareCheckSums(infiles, outfile):
'''compare checksum files against existing reference data.
'''
to_cluster = False
outf = IOTools.openFile(outfile, "w")
outf.write("\t".join((
("track", "status",
"job_finished",
"nfiles", "nref",
"missing", "extra",
"different",
"different_md5",
"different_lines",
"same",
"same_md5",
"same_lines",
"same_exist",
"files_missing",
"files_extra",
"files_different_md5",
"files_different_lines"))) + "\n")
for infile in infiles:
E.info("working on {}".format(infile))
track = P.snip(infile, ".stats")
logfiles = glob.glob(track + "*.log")
job_finished = True
for logfile in logfiles:
is_complete = IOTools.isComplete(logfile)
E.debug("logcheck: {} = {}".format(logfile, is_complete))
job_finished = job_finished and is_complete
reffile = track + ".ref"
# regular expression of files to test only for existence
regex_exist = PARAMS.get('%s_regex_exist' % track, None)
if regex_exist:
regex_exist = re.compile("|".join(P.asList(regex_exist)))
regex_linecount = PARAMS.get('%s_regex_linecount' % track, None)
if regex_linecount:
regex_linecount = re.compile("|".join(P.asList(regex_linecount)))
regex_md5 = PARAMS.get('%s_regex_md5' % track, None)
if regex_md5:
regex_md5 = re.compile("|".join(P.asList(regex_md5)))
if not os.path.exists(reffile):
raise ValueError('no reference data defined for %s' % track)
cmp_data = pandas.read_csv(IOTools.openFile(infile),
sep="\t",
index_col=0)
ref_data = pandas.read_csv(IOTools.openFile(reffile),
sep="\t",
index_col=0)
shared_files = set(cmp_data.index).intersection(ref_data.index)
missing = set(ref_data.index).difference(cmp_data.index)
extra = set(cmp_data.index).difference(ref_data.index)
different = set(shared_files)
# remove those for which only check for existence
if regex_exist:
same_exist = set([x for x in different
if regex_exist.search(x)])
different = set([x for x in different
if not regex_exist.search(x)])
else:
same_exist = set()
# select those for which only check for number of lines
if regex_linecount:
check_lines = [x for x in different
if regex_linecount.search(x)]
dd = (cmp_data['nlines'][check_lines] !=
ref_data['nlines'][check_lines])
different_lines = set(dd.index[dd])
different = different.difference(check_lines)
dd = (cmp_data['nlines'][check_lines] ==
ref_data['nlines'][check_lines])
same_lines = set(dd.index[dd])
else:
different_lines = set()
same_lines = set()
# remainder - check md5
if regex_md5:
check_md5 = [x for x in different
if regex_md5.search(x)]
dd = (cmp_data['md5'][check_md5] !=
ref_data['md5'][check_md5])
different_md5 = set(dd.index[dd])
dd = (cmp_data['md5'][check_md5] ==
ref_data['md5'][check_md5])
same_md5 = set(dd.index[dd])
else:
different_md5 = set()
same_md5 = set()
if job_finished and (len(missing) + len(extra) + \
len(different_md5) + len(different_lines) == 0):
status = "OK"
else:
status = "FAIL"
outf.write("\t".join(map(str, (
track,
status,
job_finished,
len(cmp_data),
len(ref_data),
len(missing),
len(extra),
len(different_md5) + len(different_lines),
len(different_md5),
len(different_lines),
len(same_md5) + len(same_lines) + len(same_exist),
len(same_md5),
len(same_lines),
len(same_exist),
",".join(missing),
",".join(extra),
",".join(different_md5),
",".join(different_lines),
))) + "\n")
outf.close()
@transform(compareCheckSums,
suffix(".tsv"),
".load")
def loadComparison(infile, outfile):
'''load comparison data into database.'''
P.load(infile, outfile)
@transform(mergeFileStatistics,
suffix(".stats"),
"_results.load")
def loadResults(infile, outfile):
'''load comparison data into database.'''
P.load(infile, outfile, options="--add-index=file")
@transform(mergeFileStatistics,
suffix(".ref"),
"_reference.load")
def loadReference(infile, outfile):
'''load comparison data into database.'''
P.load(infile, outfile, options="--add-index=file")
@follows(runTests, runReports)
def run_components():
pass
@follows(run_components, loadComparison, loadResults, loadReference)
def full():
pass
@files(None, 'reset.log')
def reset(infile, outfile):
'''remove all data in pipeline.'''
to_cluster = False
statement = '''
rm -rf prereq_* ctmp*;
rm -rf test_* _cache _static _templates _tmp report;
rm -f *.log csvdb *.load *.tsv'''
P.run()
###################################################################
###################################################################
###################################################################
# primary targets
###################################################################
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting report build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating report")
P.run_report(clean=False)
@follows(update_report)
def publish_report():
'''publish report.'''
E.info("publishing report")
P.publish_report()
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| mit |
irblsensitivity/irblsensitivity | scripts/analysis/ProjectPlot.py | 1 | 2545 | #-*- coding: utf-8 -*-
'''
Created on 2016. 11. 19
Updated on 2016. 01. 09
'''
from __future__ import print_function
import os
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from ExpBase import ExpBase
class ProjectPlot(ExpBase):
__name__ = u'Correlation'
OUTPUT = u''
def __init__(self, _datafile, _type, output):
super(ProjectPlot, self).__init__()
self.OUTPUT = output
titles, datas = self.load_results(_datafile, ['str'] * 2 + ['float'] * 6)
filename = os.path.join(self.OUTPUT, 'boxplot_%s.pdf' % (_type))
self.draw_boxplot(titles, datas, _type, filename)
print(u'created grath :: %s' % filename)
def draw_boxplot(self, _labels, _data, _type, _filename):
orders = self.get_order(_data)
for x in range(len(_labels)):
_labels[x] = _labels[x].strip()
data = []
for idx in range(len(_labels)):
item, lb = self.get_array(_data, idx, orders)
data.append(np.asarray(item))
# setting plot
flierprops = dict(marker='o', markerfacecolor='black', markersize=5, linestyle='none')
bp_dict = plt.boxplot(data, labels=_labels, flierprops=flierprops)
plt.title('The distribution of subject\'s %s for each technique' % _type)
for line in bp_dict['medians']:
x, y = line.get_xydata()[1] # get position data for median line
plt.text(x, y, '%.4f' % y, horizontalalignment='center') # draw above, centered
for line in bp_dict['whiskers']:
x, y = line.get_xydata()[0] # bottom line
plt.text(x, y, '%.4f' % y, horizontalalignment='center') # draw above, centered
x, y = line.get_xydata()[1] # top line
plt.text(x, y, '%.4f' % y, horizontalalignment='center') # draw above, centered
plt.savefig(_filename)
plt.clf() # Clear figure
#plt.close()
###############################################################################################################
###############################################################################################################
if __name__ == "__main__":
# args = getargs()
# if args is None:
# exit(0)
output = u'/var/experiments/BugLocalization/dist/analysis/01_ProjectPlot/'
if os.path.exists(output) is True:
import shutil
shutil.rmtree(output)
if os.path.exists(output) is False:
os.makedirs(output)
obj = ProjectPlot(u'/var/experiments/BugLocalization/dist/analysis/New_Multiple_MAP.txt', 'MAP', output)
obj = ProjectPlot(u'/var/experiments/BugLocalization/dist/analysis/New_Multiple_MRR.txt', 'MRR', output)
pass | apache-2.0 |
sugartom/tensorflow-alien | tensorflow/examples/learn/iris_custom_model.py | 50 | 2613 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
nmayorov/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
mwv/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
icdishb/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 75 | 34122 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
simoneriggi/ska | caesar/scripts/skymodel_generator.py | 1 | 50953 | #!/usr/bin/env python
##################################################
### MODULE IMPORT
##################################################
## STANDARD MODULES
import os
import sys
import subprocess
import string
import time
import signal
from threading import Thread
import datetime
import numpy as np
import random
import math
##from ctypes import *
## ASTRO
from scipy import ndimage
##import pyfits
from astropy.io import fits
from astropy.units import Quantity
from astropy.modeling.parameters import Parameter
from astropy.modeling.core import Fittable2DModel
from astropy.modeling.models import Box2D, Gaussian2D, Ring2D, Ellipse2D, TrapezoidDisk2D, Disk2D, AiryDisk2D, Sersic2D
from photutils.datasets import make_noise_image
from astropy import wcs
## ROOT
import ROOT
from ROOT import gSystem, TFile, TTree, gROOT, AddressOf
## CAESAR
gSystem.Load('libCaesar')
from ROOT import Caesar
## COMMAND-LINE ARG MODULES
import getopt
import argparse
import collections
## Graphics modules
import matplotlib.pyplot as plt
import pylab
##################################################
#### GET SCRIPT ARGS ####
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_args():
"""This function parses and return arguments passed in"""
parser = argparse.ArgumentParser(description="Parse args.")
# - GENERAL IMAGE OPTIONS
parser.add_argument('-nx', '--nx', dest='nx', required=True, type=int, action='store',help='Image width in pixels')
parser.add_argument('-ny', '--ny', dest='ny', required=True, type=int, action='store',help='Image height in pixels')
parser.add_argument('-marginx', '--marginx', dest='marginx', required=False, type=int, default=0,action='store',help='Image x margin in pixels')
parser.add_argument('-marginy', '--marginy', dest='marginy', required=False, type=int, default=0,action='store',help='Image y margin in pixels')
parser.add_argument('-pixsize', '--pixsize', dest='pixsize', required=True, type=float, action='store',help='Map pixel size in arcsec')
parser.add_argument('-bmaj', '--bmaj', dest='bmaj', required=True, type=float, default=10, action='store',help='Beam bmaj in arcsec (default=5)')
parser.add_argument('-bmin', '--bmin', dest='bmin', required=True, type=float, default=5, action='store',help='Beam bmin in arcsec (default=5)')
parser.add_argument('-bpa', '--bpa', dest='bpa', required=False, type=float, default=0, action='store',help='Beam bpa in deg (default=0)')
parser.add_argument('-crpix1', '--crpix1', dest='crpix1', required=False, type=float, default=1, action='store',help='CRPIX1 fits keyword (default=1)')
parser.add_argument('-crpix2', '--crpix2', dest='crpix2', required=False, type=float, default=1, action='store',help='CRPIX2 fits keyword (default=1)')
parser.add_argument('-crval1', '--crval1', dest='crval1', required=False, type=float, default=254.851041667, action='store',help='CRVAL1 fits keyword (default=1)')
parser.add_argument('-crval2', '--crval2', dest='crval2', required=False, type=float, default=-41.4765888889, action='store',help='CRVAL2 fits keyword (default=1)')
#parser.add_argument('-ctype1', '--ctype1', dest='ctype1', required=False, type=str, default='RA---NCP', action='store',help='CTYPE1 fits keyword (default=1)')
#parser.add_argument('-ctype2', '--ctype2', dest='ctype2', required=False, type=str, default='DEC--NCP', action='store',help='CTYPE2 fits keyword (default=1)')
parser.add_argument('-ctype1', '--ctype1', dest='ctype1', required=False, type=str, default='RA---SIN', action='store',help='CTYPE1 fits keyword (default=1)')
parser.add_argument('-ctype2', '--ctype2', dest='ctype2', required=False, type=str, default='DEC--SIN', action='store',help='CTYPE2 fits keyword (default=1)')
# - BKG OPTIONS
parser.add_argument('--bkg', dest='enable_bkg', action='store_true')
parser.add_argument('--no-bkg', dest='enable_bkg', action='store_false')
parser.set_defaults(enable_bkg=True)
parser.add_argument('-bkg_level', '--bkg_level', dest='bkg_level', required=False, type=float, default=10e-6, action='store',help='Bkg level (default=0)')
parser.add_argument('-bkg_rms', '--bkg_rms', dest='bkg_rms', required=False, type=float, default=100e-6, action='store',help='Bkg rms (default=0)')
# - COMPACT SOURCE OPTIONS
parser.add_argument('-npixels_min', '--npixels_min', dest='npixels_min', required=False, type=int, default=5, action='store',help='Minimum number of pixels for a generated source (default=5)')
parser.add_argument('--compactsources', dest='enable_compactsources', action='store_true')
parser.add_argument('--no-compactsources', dest='enable_compactsources', action='store_false')
parser.set_defaults(enable_compactsources=True)
parser.add_argument('-zmin', '--zmin', dest='zmin', required=False, type=float, default=1, action='store',help='Minimum source significance level in sigmas above the bkg (default=1)')
parser.add_argument('-zmax', '--zmax', dest='zmax', required=False, type=float, default=30, action='store',help='Maximum source significance level in sigmas above the bkg (default=30)')
parser.add_argument('-source_density', '--source_density', dest='source_density', required=False, type=float, default=1000, action='store',help='Compact source density (default=1000)')
# - EXTENDED SOURCES
parser.add_argument('--extsources', dest='enable_extsources', action='store_true')
parser.add_argument('--no-extsources', dest='enable_extsources', action='store_false')
parser.set_defaults(enable_extsources=True)
parser.add_argument('-ext_source_density', '--ext_source_density', dest='ext_source_density', required=False, type=float, default=100, action='store',help='Extended source density (default=1000)')
parser.add_argument('-zmin_ext', '--zmin_ext', dest='zmin_ext', required=False, type=float, default=0.1, action='store',help='Minimum extended source significance level in sigmas above the bkg (default=0.1)')
parser.add_argument('-zmax_ext', '--zmax_ext', dest='zmax_ext', required=False, type=float, default=2, action='store',help='Maximum extended source significance level in sigmas above the bkg (default=2)')
parser.add_argument('-ext_scale_min', '--ext_scale_min', dest='ext_scale_min', required=False, type=float, default=10, action='store',help='Minimum extended source size in arcsec (default=10)')
parser.add_argument('-ext_scale_max', '--ext_scale_max', dest='ext_scale_max', required=False, type=float, default=3600, action='store',help='Maximum extended source size in arcsec (default=3600)')
parser.add_argument('-ext_source_type', '--ext_source_type', dest='ext_source_type', required=False, type=int, default=-1, action='store',help='Extended source type to generate (-1=all types from available models, 1=ring, 2=ellipse, 3=bubble+shell, 4=airy disk (default=-1)')
# - SOURCE MODEL OPTIONS
parser.add_argument('-ring_rmin', '--ring_rmin', dest='ring_rmin', required=False, type=float, default=0.5, action='store',help='Minimum ring radius in arcsec (default=1)')
parser.add_argument('-ring_rmax', '--ring_rmax', dest='ring_rmax', required=False, type=float, default=10, action='store',help='Maximum ring radius in arcsec (default=10)')
parser.add_argument('-ring_wmin', '--ring_wmin', dest='ring_wmin', required=False, type=float, default=5, action='store',help='Minimum ring width in arcsec (default=1)')
parser.add_argument('-ring_wmax', '--ring_wmax', dest='ring_wmax', required=False, type=float, default=20, action='store',help='Maximum ring width in arcsec (default=10)')
parser.add_argument('-ellipse_rmin', '--ellipse_rmin', dest='ellipse_rmin', required=False, type=float, default=0.5, action='store',help='Ellipse bmaj in arcsec (default=1)')
parser.add_argument('-ellipse_rmax', '--ellipse_rmax', dest='ellipse_rmax', required=False, type=float, default=10, action='store',help='Ellipse bmin in arcsec (default=10)')
parser.add_argument('-disk_shell_ampl_ratio_min', '--disk_shell_ampl_ratio_min', dest='disk_shell_ampl_ratio_min', required=False, type=float, default=0.1, action='store',help='Disk/shell amplitude ratio min (default=0.1)')
parser.add_argument('-disk_shell_ampl_ratio_max', '--disk_shell_ampl_ratio_max', dest='disk_shell_ampl_ratio_max', required=False, type=float, default=0.8, action='store',help='Disk/shell amplitude ratio max (default=0.8)')
parser.add_argument('-disk_shell_radius_ratio_min', '--disk_shell_radius_ratio_min', dest='disk_shell_radius_ratio_min', required=False, type=float, default=0.6, action='store',help='Disk/shell radius ratio min (default=0.6)')
parser.add_argument('-disk_shell_radius_ratio_max', '--disk_shell_radius_ratio_max', dest='disk_shell_radius_ratio_max', required=False, type=float, default=0.9, action='store',help='Disk/shell radius ratio max (default=0.8)')
parser.add_argument('-zmin_model', '--zmin_model', dest='model_trunc_zmin', required=False, type=float, default=1, action='store',help='Minimum source significance level in sigmas above the bkg below which source data are set to 0 (default=1)')
parser.add_argument('-mask_boxsize', '--mask_boxsize', dest='mask_boxsize', required=False, type=float, default=10, action='store',help='Mask box size in pixels (default=10)')
parser.add_argument('-trunc_thr', '--trunc_thr', dest='trunc_thr', required=False, type=float, default=0.01, action='store',help='Source model truncation thr (default=0.01)')
# - OUTPUT FILE OPTIONS
parser.add_argument('-outputfile', '--outputfile', dest='outputfile', required=False, type=str, default='simmap.fits',action='store',help='Output filename')
parser.add_argument('-outputfile_model', '--outputfile_model', dest='outputfile_model', required=False, type=str, default='skymodel.fits', action='store',help='Model filename')
parser.add_argument('-outputfile_sources', '--outputfile_sources', dest='outputfile_sources', required=False, type=str, default='sources.root',action='store',help='Skymodel source ROOT Output filename')
parser.add_argument('-outputfile_ds9region', '--outputfile_ds9region', dest='outputfile_ds9region', required=False, type=str, default='dsregion.reg',action='store',help='DS9 source region filename')
parser.add_argument('-outputfile_casaregion', '--outputfile_casaregion', dest='outputfile_casaregion', required=False, type=str, default='casa_mask.dat',action='store',help='CASA source region filename')
args = parser.parse_args()
return args
###########################
## MODELS
###########################
class RingSector2D(Fittable2DModel):
""" Two dimensional radial symmetric Ring model """
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
r_in = Parameter(default=1)
width = Parameter(default=1)
theta_min = Parameter(default=-np.pi)
theta_max = Parameter(default=np.pi)
def __init__(self, amplitude=amplitude.default, x_0=x_0.default, y_0=y_0.default, r_in=r_in.default, width=width.default, theta_min=theta_min.default, theta_max=theta_max.default, **kwargs):
# If outer radius explicitly given, it overrides default width.
if width is None:
width = self.width.default
if theta_min is None:
theta_min = self.theta_min.default
if theta_max is None:
theta_max = self.theta_max.default
super(RingSector2D, self).__init__(amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width, theta_min=theta_min, theta_max=theta_max, **kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width, theta_min, theta_max):
"""Two dimensional Ring sector model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
theta = np.arctan2(x-x_0,y-y_0)
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
theta_range= np.logical_and(theta>=theta_min, theta<=theta_max)
sector_range = np.logical_and(r_range,theta_range)
result = np.select([sector_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ( (self.y_0 - dr, self.y_0 + dr), (self.x_0 - dr, self.x_0 + dr) )
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit, 'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']), ('y_0', inputs_unit['x']), ('r_in', inputs_unit['x']), ('width', inputs_unit['x']), ('amplitude', outputs_unit['z'])])
###########################
## SIMULATOR CLASS
###########################
class SkyMapSimulator(object):
""" Sky map simulator class
Attributes:
nx: image width in pixels
ny: image height in pixels
pixsize: pixel size in arcsec (default=1)
"""
def __init__(self, nx, ny, pixsize=1):
""" Return a SkyMapGenerator object """
## Image parameters
self.nx = nx #in pixels
self.ny = ny # in pixels
self.marginx= 0 # in pixels (no margin)
self.marginy= 0 # in pixels (no margin)
self.pixsize= pixsize # in arcsec
self.gridy, self.gridx = np.mgrid[0:ny, 0:nx]
self.crpix1= 1
self.crpix2= 1
self.crval1= 254.851041667
self.crval2= -41.4765888889
self.ctype1= 'RA---SIN'
self.ctype2= 'DEC--SIN'
## Source model
self.truncate_models= True
self.trunc_thr= 0.01 # 1% flux truncation at maximum
self.trunc_model_zmin= 1
## Mask box size
self.mask_boxsize= 10 # in pixels
## Bkg parameters
self.simulate_bkg= True
self.bkg_level= 0 # in Jy
self.bkg_rms= 10.e-6 # in Jy
## Compact source parameters
self.simulate_compact_sources= True
self.source_density= 2000. # in sources/deg^2
self.beam_bmaj= 6.5 # in arcsec
self.beam_bmin= 6.5 # in arcsec
self.beam_bpa= 0 # in deg
self.beam_area= self.compute_beam_area(self.beam_bmaj,self.beam_bmin) # in pixels
self.zmin= 1 # in sigmas
self.zmax= 30 # in sigmas
self.npixels_min= 5
## Extended source parameters
self.simulate_ext_sources= True
self.ext_source_type= -1 # all source models generated
self.ext_source_density= 10 # in sources/deg^2
self.zmin_ext= 0.5 # in sigmas
self.zmax_ext= 5 # in sigmas
self.ring_rmin= 2. # in arcsec
self.ring_rmax= 10. # in arcsec
self.ring_width_min= 5 # in arcsec
self.ring_width_max= 10 # in arcsec
self.ellipse_rmin= 1 # in arcsec
self.ellipse_rmax= 10 # in arcsec
self.ellipse_rratiomin= 0.7 # ratio rmin/rmax
self.disk_rmin= 2 # in arcsec
self.disk_rmax= 10 # in arcsec
self.shell_disk_ampl_ratio_min= 0.1
self.shell_disk_ampl_ratio_max= 0.8
self.shell_disk_radius_ratio_min= 0.6
self.shell_disk_radius_ratio_max= 0.9
self.sersic_radius= 10 # in arcsec
self.sersic_ellipticity= 0.5
self.sersic_index= 4
## Map output file
self.mapfilename= 'simmap.fits'
self.modelfilename= 'skymodel.fits'
## DS9 output file
self.ds9filename= 'ds9region.reg'
## CASA region output file
self.casafilename= 'casamask.dat'
## Caesar img & sources
self.outfilename= 'SimOutput.root'
self.outfile= None
self.outtree= None
self.cs = None
self.caesar_sources= []
self.caesar_img= None
def init(self):
""" Initialize data """
## Initialize output tree & file
self.outfile= ROOT.TFile(self.outfilename,'RECREATE')
self.outtree= ROOT.TTree('SourceInfo','SourceInfo')
self.cs = Caesar.Source()
self.outtree.Branch('Source',self.cs)
def set_mask_box_size(self,boxsize):
""" Set mask box size """
if boxsize<=0:
raise ValueError('Invalid boxsize specified (shall be larger than 0')
self.mask_boxsize= boxsize
def set_margins(self,marginx,marginy):
""" Set margin in X & Y """
if (marginx<0 or marginy<0 or marginx>=self.nx/2 or marginy>=self.ny/2) :
raise ValueError('Invalid margin specified (<0 or larger than image half size!')
self.marginx= marginx
self.marginy= marginy
def set_ref_pix(self,x,y):
""" Set reference pixel (CRPIX1,CRPIX2) in FITS output """
self.crpix1= x
self.crpix2= y
def set_ref_pix_coords(self,x,y):
""" Set reference pixel coords (CRPIX1,CRPIX2) in FITS output """
self.crval1= x
self.crval2= y
def set_coord_system_type(self,x,y):
""" Set coord system type (CTYPE1,CTYPE2) in FITS output """
self.ctype1= x
self.ctype2= y
def enable_compact_sources(self,choice):
""" Enable/disable compact source generation """
self.simulate_compact_sources= choice
def enable_extended_sources(self,choice):
""" Enable/disable extended source generation """
self.simulate_extended_sources= choice
def enable_bkg(self,choice):
""" Enable/disable bkg generation """
self.simulate_bkg= choice
def set_npixels_min(self,value):
""" Set the minimum number of pixels for a generated source"""
self.npixels_min= value
def truncate_models(self,choice):
""" Enable/disable continuous model truncation (gaussian, airy disk, ...) """
self.truncate_models= choice
def set_model_trunc_significance(self,value):
""" Set the significance level below which source model data are truncated """
self.trunc_model_zmin= value
def set_model_trunc_thr(self,value):
""" Set the flux percentage level for source model truncation """
self.trunc_thr= value
def set_ext_source_type(self,value):
""" Set the extended source type to be generated (-1=all, 1=ring, 2=ellipse, 3=bubble+shell, 4=airy)"""
self.ext_source_type= value
def set_ds9region_filename(self,filename):
""" Set the output DS9 region filename """
self.ds9filename= filename
def set_casaregion_filename(self,filename):
""" Set the output CASA region filename """
self.casafilename= filename
def set_map_filename(self,filename):
""" Set the output map filename """
self.mapfilename= filename
def set_model_filename(self,filename):
""" Set the output model filename """
self.modelfilename= filename
def set_source_filename(self,filename):
""" Set the output source ROOT filename """
self.outfilename= filename
def set_source_significance_range(self,zmin,zmax):
""" Set source significance range """
self.zmin= zmin
self.zmax= zmax
def set_ext_source_significance_range(self,zmin,zmax):
""" Set source significance range """
self.zmin_ext= zmin
self.zmax_ext= zmax
def set_source_density(self,density):
""" Set compact source density in deg^-2 """
self.source_density= density
def set_ext_source_density(self,density):
""" Set extended source density in deg^-2 """
self.ext_source_density= density
def set_ring_pars(self,rmin,rmax,wmin,wmax):
""" Set ring model parameters"""
self.ring_rmin= rmin
self.ring_rmax= rmax
self.ring_width_min= wmin
self.ring_width_max= wmax
def set_sersic_pars(self,radius,ell,index):
""" Set Sersic model pars"""
self.sersic_radius= radius
self.sersis_ellipticity= ell
self.sersic_index= index
def set_disk_pars(self,rmin,rmax):
""" Set disk model parameters"""
self.disk_rmin= rmin
self.disk_rmax= rmax
def set_disk_shell_pars(self,ampl_ratio_min,ampl_ratio_max,radius_ratio_min,radius_ratio_max):
""" Set disk shell model parameters"""
self.shell_disk_ampl_ratio_min= ampl_ratio_min
self.shell_disk_ampl_ratio_max= ampl_ratio_max
self.shell_disk_radius_ratio_min= radius_ratio_min
self.shell_disk_radius_ratio_max= radius_ratio_max
def set_ellipse_pars(self,rmin,rmax):
""" Set ring model parameters"""
self.ellipse_rmin= rmin
self.ellipse_rmax= rmax
def set_bkg_pars(self,bkg_level,bkg_rms):
""" Set bkg parameters """
self.bkg_level= bkg_level
self.bkg_rms= bkg_rms
def set_beam_info(self,Bmaj,Bmin,Bpa):
""" Set beam info """
self.beam_bmaj= Bmaj
self.beam_bmin= Bmin
self.beam_bpa= Bpa
self.beam_area= self.compute_beam_area(Bmaj,Bmin)
def compute_beam_area(self,Bmaj,Bmin):
""" Compute beam area """
A= np.pi*Bmaj*Bmin/(4*np.log(2)) #2d gaussian area with FWHM=fx,fy (in arcsec^2)
pixelArea= np.fabs(self.pixsize*self.pixsize) # in arcsec^2
beam_area= A/pixelArea # in pixels
return beam_area
def compute_beam_sigma(self,fwhm):
""" """
sigma= fwhm/(2.*np.sqrt(2.*np.log(2.)))
return sigma
def generate_bkg(self):
""" Generate bkg data """
shape = (self.ny, self.nx)
bkg_data = make_noise_image(shape, type='gaussian', mean=self.bkg_level, stddev=self.bkg_rms)
return bkg_data
def generate_blob(self,ampl,x0,y0,sigmax,sigmay,theta,trunc_thr=0.01):
""" Generate a blob
Arguments:
ampl: peak flux in Jy
x0, y0: gaussian means in pixels
sigmax, sigmay: gaussian sigmas in pixels
theta: rotation in degrees
trunc_thr: truncation significance threshold
"""
#modelFcn= Gaussian2D(ampl,x0,y0,sigmax,sigmay,theta=math.radians(theta))
data= Gaussian2D(ampl,x0,y0,sigmax,sigmay,theta=math.radians(theta))(self.gridx, self.gridy)
totFlux= (float)(np.sum(data,axis=None))
print('totFlux=%s' % str(totFlux))
## Truncate data such that sum(data)_trunc/sum(data)<f
f= trunc_thr
if self.truncate_models:
data_vect_sorted= np.ravel(data)
data_csum= np.cumsum(data_vect_sorted)/totFlux
fluxThr= data_vect_sorted[np.argmin(data_csum<f)]
print('fluxThr=%s' % str(fluxThr))
data[data<fluxThr] = 0
## Truncate data at minimum significance
#ampl_min= (trunc_thr*self.bkg_rms) + self.bkg_level
#if self.truncate_models:
# data[data<ampl_min] = 0
return data
def generate_ring(self,ampl,x0,y0,radius,width):
""" Generate a ring
Arguments:
ampl: peak flux in Jy
x0, y0: means in pixels
radius: ring radius in pixels
width: ring width in pixels
"""
data= Ring2D(ampl,x0,y0,radius,width)(self.gridx, self.gridy)
return data
def generate_ring_sector(self,ampl,x0,y0,radius,width,theta_min,theta_max):
""" Generate a ring
Arguments:
ampl: peak flux in Jy
x0, y0: means in pixels
radius: ring radius in pixels
width: ring width in pixels
theta_min, theta_max: sector theta min/max in degrees
"""
data= RingSector2D(ampl,x0,y0,radius,width,np.radians(theta_min),np.radians(theta_max))(self.gridx, self.gridy)
return data
def generate_bubble(self,ampl,x0,y0,radius,shell_ampl,shell_radius,shell_width,shell_theta_min,shell_theta_max):
""" Generate a bubble with a shell """
disk_data= Disk2D(ampl,x0,y0,radius)(self.gridx, self.gridy)
shell_data= self.generate_ring_sector(shell_ampl,x0,y0,shell_radius,shell_width,shell_theta_min,shell_theta_max)
data= disk_data + shell_data
return data
def generate_ellipse(self,ampl,x0,y0,a,b,theta):
""" Generate ellipse """
data= Ellipse2D(ampl,x0,y0,a,b,math.radians(theta))(self.gridx, self.gridy)
return data
def generate_airy_disk(self,ampl,x0,y0,radius,trunc_thr=0.01):
""" Generate Airy disk """
data= AiryDisk2D(amplitude=ampl,x_0=x0,y_0=y0,radius=radius)(self.gridx, self.gridy)
totFlux= (float)(np.sum(data,axis=None))
## Truncate data such that sum(data)_trunc/sum(data)<f
f= trunc_thr
if self.truncate_models:
data_vect_sorted= np.ravel(data)
data_csum= np.cumsum(data_vect_sorted)/totFlux
fluxThr= data_vect_sorted[np.argmin(data_csum<f)]
data[data<fluxThr] = 0
## Truncate data at minimum significance
#ampl_min= (self.zmin_ext*self.bkg_rms) + self.bkg_level
#if self.truncate_models:
# data[data<ampl_min] = 0
return data
def generate_sersic(self,ampl,x0,y0,radius,ell,index,theta,trunc_thr=0.01):
""" Generate Sersic model """
data= Sersic2D(amplitude=ampl,x_0=x0,y_0=y0,r_eff=radius,n=index,ellip=ell,theta=math.radians(theta))(self.gridx, self.gridy)
totFlux= (float)(np.sum(data,axis=None))
## Truncate data such that sum(data)_trunc/sum(data)<f
f= trunc_thr
if self.truncate_models:
data_vect_sorted= np.ravel(data)
data_csum= np.cumsum(data_vect_sorted)/totFlux
fluxThr= data_vect_sorted[np.argmin(data_csum<f)]
data[data<fluxThr] = 0
## Truncate data at minimum significance
#ampl_min= (self.zmin_ext*self.bkg_rms) + self.bkg_level
#if self.truncate_models:
# data[data<ampl_min] = 0
return data
def make_caesar_source(self,source_data,source_name,source_id,source_type,source_sim_type,ampl=None,x0=None,y0=None,source_max_scale=None):
""" Create Caesar source from source data array """
# Create Caesar source
source= Caesar.Source()
# Get source indexes and fill pixels in Caesar source
source_indexes= np.column_stack(np.where(source_data!=0))
nRows= (source_data.shape)[0]
nCols= (source_data.shape)[1]
for index in source_indexes:
rowId= index[0]
colId= index[1]
S= source_data[rowId,colId]
ix= colId
iy= rowId
#iy= nRows-1-rowId
gbin= ix + iy*nCols
pixel= Caesar.Pixel(gbin,ix,iy,ix,iy,S)
source.AddPixel(pixel)
# Is at edge
if (ix==0) or (ix==nCols-1) or (iy==0) or (iy==nRows-1):
source.SetEdgeFlag(True)
# Retun None if npixels is too small
nPix= source.GetNPixels()
if nPix<self.npixels_min:
print ('INFO: Too few pixels (%s) for this source, return None!' % str(nPix))
return None
# If true info are not given compute them
# - S= count integral
# - baricenter of binary map
if x0 is None or y0 is None:
print ('INFO: No source true pos given, computing it from data...')
data_binary= np.where(source_data!=0,1,0)
#print 'INFO: data_binary sum=%d', sum(data_binary)
[y0,x0]= ndimage.measurements.center_of_mass(data_binary)
if ampl is None:
print ('INFO: No source true flux given, computing integral from data...')
ampl= np.sum(source_data,axis=None)
# Set some flags
source.SetName(source_name)
source.SetId(source_id)
source.SetType(source_type)
source.SetFlag(Caesar.Source.eFake)
source.SetSimType(source_sim_type)
if source_max_scale is not None:
source.SetSimMaxScale(source_max_scale)
source.SetTrueInfo(ampl,x0,y0)
# Set flux correction factor
fluxCorrection= self.beam_area
source.SetBeamFluxIntegral(fluxCorrection)
# Compute stats & morph pars
source.ComputeStats();
source.ComputeMorphologyParams();
return source
def make_caesar_image(self,data):
""" Make Caesar image from array data """
# Get source indexes and fill pixels in Caesar source
img_indexes= np.column_stack(np.where(data!=0))
nRows= (data.shape)[0]
nCols= (data.shape)[1]
# Set metadata
metadata= Caesar.ImgMetaData()
metadata.Nx= self.nx
metadata.Ny= self.ny
metadata.Cx= (int)(self.crpix1)
metadata.Cy= (int)(self.crpix2)
metadata.Xc= self.crval1
metadata.Yc= self.crval2
metadata.dX= -self.pixsize/3600.
metadata.dY= self.pixsize/3600.
metadata.CoordTypeX= self.ctype1
metadata.CoordTypeY= self.ctype2
metadata.BUnit= 'JY/PIXEL'
metadata.Bmaj= self.beam_bmaj/3600.
metadata.Bmin= self.beam_bmin/3600.
metadata.Bpa= self.beam_bpa
# Create Caesar image
img= Caesar.Image(nCols,nRows,"img")
img.SetMetaData(metadata)
for index in img_indexes:
rowId= index[0]
colId= index[1]
S= data[rowId,colId]
ix= colId
iy= rowId
#iy= nRows-1-rowId
gbin= ix + iy*nCols
img.FillPixel(ix,iy,S,True);
return img
def generate_compact_sources(self):
""" Generate list of compact sources in the map.
- Uniform spatial distribution
- Uniform flux distribution
Arguments:
density: source density in #sources/deg^2 (e.g. 2000)
"""
# Compute number of sources to be generated given map area in pixels
#area= (self.nx*self.ny)*self.pixsize/(3600.*3600.) # in deg^2
area= ((self.nx-2*self.marginx)*(self.ny-2*self.marginy))*self.pixsize/(3600.*3600.) # in deg^2
nsources= int(round(self.source_density*area))
S_min= (self.zmin*self.bkg_rms) + self.bkg_level
S_max= (self.zmax*self.bkg_rms) + self.bkg_level
lgS_min= np.log(S_min)
lgS_max= np.log(S_max)
randomize_flux= False
if self.zmin<self.zmax:
randomize_flux= True
print 'INFO: Generating #',nsources,' compact sources in map...'
# Compute blob sigma pars given beam info
sigmax= self.compute_beam_sigma(self.beam_bmaj)
sigmay= self.compute_beam_sigma(self.beam_bmin)
theta= self.beam_bpa + 90. # NB: BPA is the positional angle of the major axis measuring from North (up) counter clockwise, while theta is measured wrt to x axis
source_max_scale= 2*max(self.beam_bmaj,self.beam_bmin)
## Start generation loop
sources_data = Box2D(amplitude=0,x_0=0,y_0=0,x_width=2*self.nx, y_width=2*self.ny)(self.gridx, self.gridy)
mask_data = Box2D(amplitude=0,x_0=0,y_0=0,x_width=2*self.nx, y_width=2*self.ny)(self.gridx, self.gridy)
for index in range(0,nsources):
if index%100==0 :
print ("INFO: Generating compact source no. %s/%s" % (index+1,nsources))
## Generate random coordinates
#x0= np.random.uniform(0,self.nx)
#y0= np.random.uniform(0,self.ny)
#x0= np.random.uniform(0,self.nx-1)
#y0= np.random.uniform(0,self.ny-1)
x0= np.random.uniform(self.marginx,self.nx-self.marginx-1)
y0= np.random.uniform(self.marginy,self.ny-self.marginy-1)
## Compute amplitude given significance level and bkg
## Generate flux uniform in log
if randomize_flux:
lgS= np.random.uniform(lgS_min,lgS_max)
S= np.exp(lgS)
z= (S-self.bkg_level)/self.bkg_rms
else:
S= (self.zmin*self.bkg_rms) + self.bkg_level
z= self.zmin
## Generate blob
#blob_data= self.generate_blob(ampl=S,x0=x0,y0=y0,sigmax=sigmax/self.pixsize,sigmay=sigmay/self.pixsize,theta=theta,trunc_thr=self.trunc_model_zmin)
blob_data= self.generate_blob(ampl=S,x0=x0,y0=y0,sigmax=sigmax/self.pixsize,sigmay=sigmay/self.pixsize,theta=theta,trunc_thr=self.trunc_thr)
if blob_data is None:
print('Failed to generate blob (hint: too large trunc threshold), skip and regenerate...')
continue
sources_data+= blob_data
## Set model map
#ix= int(np.floor(x0))
#iy= int(np.floor(y0))
ix= int(np.round(x0))
iy= int(np.round(y0))
mask_data[iy,ix]+= S
# Make Caesar source
source_name= 'S' + str(index+1)
source_id= index+1
source_type= Caesar.Source.ePointLike
caesar_source= self.make_caesar_source(blob_data,source_name,source_id,source_type,Caesar.Source.eBlobLike,ampl=S,x0=x0,y0=y0,source_max_scale=source_max_scale)
if caesar_source is None:
print('Generate source has too few pixels, skip and regenerate...')
continue
self.caesar_sources.append(caesar_source)
print ('INFO: Source %s: Pos(%s,%s), ix=%s, iy=%s, S=%s' % (source_name,str(x0),str(y0),str(ix),str(iy),str(S)))
return [sources_data,mask_data]
def generate_extended_sources(self):
""" Generate list of extended sources in the map.
- Uniform spatial distribution
- Uniform flux distribution
Arguments:
density: source density in #sources/deg^2 (e.g. 2000)
"""
# Compute number of sources to be generated given map area in pixels
#area= (self.nx*self.ny)*self.pixsize/(3600.*3600.) # in deg^2
area= ((self.nx-2*self.marginx)*(self.ny-2*self.marginy))*self.pixsize/(3600.*3600.) # in deg^2
nsources= int(round(self.ext_source_density*area))
S_min= (self.zmin_ext*self.bkg_rms) + self.bkg_level
S_max= (self.zmax_ext*self.bkg_rms) + self.bkg_level
lgS_min= np.log(S_min)
lgS_max= np.log(S_max)
randomize_flux= False
if self.zmin_ext<self.zmax_ext:
randomize_flux= True
print 'INFO: Generating #',nsources,' extended sources in map...'
print('INFO: zmin_ext=%s, zmax_ext=%s, Smin=%s, Smax=%s' % (str(self.zmin_ext),str(self.zmax_ext),str(S_min),str(S_max)) )
## Start generation loop
sources_data = Box2D(amplitude=0,x_0=0,y_0=0,x_width=2*self.nx, y_width=2*self.ny)(self.gridx, self.gridy)
ngen_sources= 0
if self.ext_source_type==-1:
nsource_types= 5
else:
nsource_types= 1
#for index in range(0,nsources):
while (ngen_sources<nsources):
if ngen_sources%10==0 :
print ("INFO: Generating extended source no. %s/%s" % (ngen_sources+1,nsources))
## Generate random coordinates
#x0= random.uniform(0,self.nx)
#y0= random.uniform(0,self.ny)
#x0= np.random.uniform(0,self.nx-1)
#y0= np.random.uniform(0,self.ny-1)
x0= np.random.uniform(self.marginx,self.nx-self.marginx-1)
y0= np.random.uniform(self.marginy,self.ny-self.marginy-1)
## Compute amplitude given significance level and bkg
## Generate flux uniform in log
if randomize_flux:
lgS= np.random.uniform(lgS_min,lgS_max)
S= np.exp(lgS)
z= (S-self.bkg_level)/self.bkg_rms
#z= random.uniform(self.zmin_ext,self.zmax_ext)
#S= (z*self.bkg_rms) + self.bkg_level
else:
S= (self.zmin_ext*self.bkg_rms) + self.bkg_level
z= self.zmin_ext
## Generate random type (1=ring, 2=ellipse, ...)
if self.ext_source_type==-1:
source_sim_type= random.randint(1, nsource_types)
else:
source_sim_type= self.ext_source_type
source_max_scale= 0.
if source_sim_type==1: # Ring2D Sector model
source_sim_type= Caesar.Source.eRingLike
ring_r= random.uniform(self.ring_rmin,self.ring_rmax)
ring_w= random.uniform(self.ring_width_min,self.ring_width_max)
#source_data= self.generate_ring(S,x0,y0,ring_r/self.pixsize,ring_w/self.pixsize) # convert radius/width from arcsec to pixels
theta1= random.uniform(-180,180)
theta2= random.uniform(-180,180)
theta_min= min(theta1,theta2)
theta_max= max(theta1,theta2)
dtheta= theta_max-theta_min
r= ring_r
R= ring_r + ring_w
sector_diagonal= np.sqrt( r*r + R*R - 2*r*R*np.cos(np.deg2rad(dtheta)) )
sector_arc= 2*R*np.pi*dtheta/360.
source_max_scale= max(max(sector_arc,ring_w),sector_diagonal)
source_data= self.generate_ring_sector(S,x0,y0,ring_r/self.pixsize,ring_w/self.pixsize,theta_min,theta_max) # convert radius/width from arcsec to pixels
elif source_sim_type==2: # Ellipse 2D model
source_sim_type= Caesar.Source.eEllipseLike
ellipse_bmaj= random.uniform(self.ellipse_rmin,self.ellipse_rmax)
#ellipse_bmin= random.uniform(self.ellipse_rmin,self.ellipse_rmax)
ellipse_bmin= random.uniform(max(self.ellipse_rratiomin*ellipse_bmaj,self.ellipse_rmin),self.ellipse_rmax)
ellipse_theta= random.uniform(0,360)
source_max_scale= max(ellipse_bmaj,ellipse_bmin)
source_data= self.generate_ellipse(S,x0,y0,ellipse_bmaj/self.pixsize,ellipse_bmin/self.pixsize,ellipse_theta) # convert radius/width from arcsec to pixels
elif source_sim_type==3: # bubble + shell model
source_sim_type= Caesar.Source.eBubbleLike
bubble_r= random.uniform(self.disk_rmin,self.disk_rmax)
shell_excess= random.uniform(self.shell_disk_ampl_ratio_min,self.shell_disk_ampl_ratio_max)
shell_S= S*(1+shell_excess)
shell_r= random.uniform(bubble_r*self.shell_disk_radius_ratio_min,bubble_r*self.shell_disk_radius_ratio_max)
shell_width= random.uniform(0,bubble_r-shell_r)
theta1= random.uniform(-180,180)
theta2= random.uniform(-180,180)
theta_min= min(theta1,theta2)
theta_max= max(theta1,theta2)
source_max_scale= bubble_r*2
source_data= self.generate_bubble(S,x0,y0,bubble_r,shell_S,shell_r,shell_width,theta_min,theta_max)
#elif source_sim_type==4: # Airy disk
# source_sim_type= Caesar.Source.eDiskLike
# disk_r= random.uniform(self.disk_rmin,self.disk_rmax)
# source_data= self.generate_airy_disk(S,x0,y0,disk_r)
elif source_sim_type==4: # Sersic
source_sim_type= Caesar.Source.eDiskLike
sersic_r= random.uniform(self.disk_rmin,self.disk_rmax)
sersic_theta= random.uniform(0,360)
sersic_ell= random.uniform(0,1)
source_max_scale= 2*sersic_r
##source_data= self.generate_sersic(S,x0,y0,sersic_r,sersic_ell,self.sersic_index,sersic_theta)
source_data= self.generate_sersic(S,x0,y0,sersic_r,sersic_ell,self.sersic_index,sersic_theta,trunc_thr=self.trunc_thr)
elif source_sim_type==5: # Gaussian Blob like
source_sim_type= Caesar.Source.eBlobLike
blob_bmaj= random.uniform(self.ellipse_rmin,self.ellipse_rmax)
#blob_bmin= random.uniform(self.ellipse_rmin,self.ellipse_rmax)
blob_bmin= random.uniform(max(self.ellipse_rratiomin*blob_bmaj,self.ellipse_rmin),blob_bmaj)
blob_theta= random.uniform(0,360)
source_max_scale= 2*max(blob_bmin,blob_bmaj)
#source_data= self.generate_blob(ampl=S,x0=x0,y0=y0,sigmax=blob_bmaj/self.pixsize,sigmay=blob_bmin/self.pixsize,theta=blob_theta,trunc_thr=self.zmin_ext)
source_data= self.generate_blob(ampl=S,x0=x0,y0=y0,sigmax=blob_bmaj/self.pixsize,sigmay=blob_bmin/self.pixsize,theta=blob_theta,trunc_thr=self.trunc_thr)
if source_data is None:
print('Failed to generate blob (hint: too large trunc threshold), skip and regenerate...')
continue
else:
print('ERROR: Invalid source type given!')
continue
## Check if source data contains all zeros (e.g. truncation removed all data)
if np.count_nonzero(source_data)<=0:
print('WARN: Generated extended source data contains all zeros, regenerate...')
continue
## Check if source pixels and its contour has been already taken before
source_indexes= (source_data!=0) # get all source data pixels (others are 0)
source_indexes_xright= (np.roll(source_data,1,axis=1)!=0)
source_indexes_xleft= (np.roll(source_data,-1,axis=1)!=0)
source_indexes_yright= (np.roll(source_data,1,axis=0)!=0)
source_indexes_yleft= (np.roll(source_data,-1,axis=0)!=0)
source_mask_indexes= (source_indexes | source_indexes_xright | source_indexes_xleft | source_indexes_yright | source_indexes_yleft)
#source_mask= np.where(source_data!=0,1,0)
taken_pixels= np.where(sources_data[source_mask_indexes]!=0) # get list of taken pixels in main mask corresponding to this source
has_taken_pixels= np.any(taken_pixels)
if has_taken_pixels:
print 'INFO: Source pixels have been already taken by a previous generated source, regenerate...'
continue
# Add to extended source data and mask
sources_data+= source_data
ngen_sources+= 1
# Set model map
ix= int(np.round(x0))
iy= int(np.round(y0))
# Make Caesar source
source_name= 'Sext' + str(ngen_sources)
source_id= ngen_sources
source_type= Caesar.Source.eExtended
caesar_source= self.make_caesar_source(source_data,source_name,source_id,source_type,source_sim_type,None,None,None,source_max_scale)
if caesar_source is None:
print('Generate source has too few pixels, skip and regenerate...')
continue
self.caesar_sources.append(caesar_source)
print ('INFO: Ext Source %s: Pos(%s,%s), ix=%s, iy=%s, S=%s' % (source_name,str(x0),str(y0),str(ix),str(iy),str(S)))
return sources_data
#####################################
### GENERATE MAP ##
#####################################
def generate_map(self):
""" Generate sky map """
## == INITIALIZE DATA ==
print ('INFO: Initializing simulator data...')
self.init()
## == GENERATE EMPTY IMAGE ==
data = Box2D(amplitude=0,x_0=0,y_0=0,x_width=2*self.nx, y_width=2*self.ny)(self.gridx, self.gridy)
mask_data = Box2D(amplitude=0,x_0=0,y_0=0,x_width=2*self.nx, y_width=2*self.ny)(self.gridx, self.gridy)
## == GENERATE BKG ==
if self.simulate_bkg:
print ('INFO: Generating map bkg...')
bkg_data= self.generate_bkg()
data+= bkg_data
## == GENERATE COMPACT SOURCES ==
if self.simulate_compact_sources:
print ('INFO: Generating compact sources...')
[compact_source_data,compact_source_mask_data] = self.generate_compact_sources()
data+= compact_source_data
mask_data+= compact_source_mask_data
## == GENERATE EXTENDED SOURCES ==
if self.simulate_extended_sources:
print ('INFO: Generating extended sources...')
ext_source_data = self.generate_extended_sources()
data+= ext_source_data
mask_data+= ext_source_data
## == MAKE FINAL MAP ==
print ('INFO: Creating final map with bkg + sources added...')
## Sum data in cumulative map
#data= bkg_data + compact_source_data + ext_source_data
#mask_data= compact_source_mask_data + ext_source_data
## Cast data from float64 to float32
data_casted = data.astype(np.float32)
mask_data_casted = mask_data.astype(np.float32)
## Convert data from Jy/pixel to Jy/beam
## Jy/pixel= Jy/beam / beamArea(pixels)
scaleFactor= self.beam_area
data_casted*= scaleFactor
## Create Caesar skymodel image from data (units= Jy/pixel)
print ('INFO: Creating Caesar image from data...')
##self.caesar_img= self.make_caesar_image(data_casted) # set toy sim map data
self.caesar_img= self.make_caesar_image(mask_data_casted) # set skymodel map data
## == WRITE MAPS TO FITS FILES ==
print ('INFO: Writing images to FITS...')
self.write_map(data_casted,self.mapfilename)
self.write_source_map(mask_data_casted,self.modelfilename)
## == WRITE IMG & SOURCES TO ROOT FILE ==
print ('INFO: Writing image & source collection to ROOT file...')
self.save()
## == WRITE DS9 REGION FILE ==
print ('INFO: Writing DS9 regions...')
self.write_ds9_regions()
#return [data_casted,mask_data_casted]
def write_ds9_regions(self):
""" Write DS9 regions with sim sources """
## Open file
fout = open(self.ds9filename, 'wb')
## Write file header
fout.write('global color=white font=\"helvetica 8 normal\" edit=1 move=1 delete=1 include=1\n')
fout.write('image\n')
## Write source contour region
for item in self.caesar_sources:
regionInfo= item.GetDS9Region(True)
fout.write(regionInfo)
fout.write('\n')
fout.close();
def write_casa_mask(self,boxsize=10):
""" Write CASA mask file around simulated sources"""
## Create a WCS structure
w = wcs.WCS(naxis=2)
w.wcs.crpix = [self.crpix1, self.crpix2]
w.wcs.cdelt = np.array([-self.pixsize/3600., self.pixsize/3600.])
w.wcs.crval = [self.crval1, self.crval2]
w.wcs.ctype = [self.ctype1, self.ctype2]
#w.wcs.set_pv([(2, 1, 45.0)])
## Create mask ascii file with header
f = open(str(self.casafilename), 'wb')
f.write("#CRTFv0\n")
#f.write("global coord = J2000, color=blue\n")
## Create a CASA box around the source
for item in self.caesar_sources:
ix_min= item.GetIxMin()
ix_max= item.GetIxMax()
iy_min= item.GetIyMin()
iy_max= item.GetIyMax()
# Set box coordinates
pixcrd = np.array([[max(0,ix_min-boxsize/2.), max(0,iy_min-boxsize/2.)], [min(self.nx-1,ix_max+boxsize/2.), min(self.ny-1,iy_max+boxsize/2.)]], np.float_)
# Convert pixel coordinates to world coordinates
world = w.wcs_pix2world(pixcrd, 1)
print(world)
f.write("box [ [{0}deg,{1}deg], [{2}deg,{3}deg] ]\n".format(min(world[0,0],world[1,0]),min(world[0,1],world[1,1]),max(world[0,0],world[1,0]),max(world[0,1],world[1,1])))
# Close ascii file
f.close()
def draw_map(self,data):
""" Draw map data """
plt.imshow(data, origin='lower', cmap="hot")
pylab.show()
def write_map(self,data,outputfile):
""" Write FITS image with sim data """
# Define FITS header
header= fits.Header()
header.set('SIMPLE','T')
header.set('BITPIX','-32')
header.set('NAXIS1', str(self.nx))
header.set('NAXIS2', str(self.ny))
#header.set('NAXIS3', 1)
#header.set('NAXIS4', 1)
header.set('BUNIT', 'JY/BEAM')
header.set('BMAJ', self.beam_bmaj/3600.)
header.set('BMIN', self.beam_bmin/3600.)
header.set('BPA', self.beam_bpa)
header.set('BSCALE',1.)
header.set('BZERO',0.)
header.set('CDELT1',-self.pixsize/3600.)
header.set('CDELT2',self.pixsize/3600.)
header.set('CTYPE1',self.ctype1)
header.set('CTYPE2',self.ctype2)
header.set('CRPIX1',self.crpix1)
header.set('CRPIX2',self.crpix2)
header.set('CRVAL1',self.crval1)
header.set('CRVAL2',self.crval2)
# Define HDU
hdu = fits.PrimaryHDU(data=data,header=header)
hdulist = fits.HDUList([hdu])
hdulist.writeto(outputfile,overwrite=True)
def write_source_map(self,data,outputfile):
""" Write FITS image with sim mask data """
# Define FITS header
header= fits.Header()
header.set('SIMPLE','T')
header.set('BITPIX','-32')
header.set('NAXIS1', str(self.nx))
header.set('NAXIS2', str(self.ny))
header.set('BUNIT', 'JY/pixel')
header.set('BMAJ', self.beam_bmaj/3600.)
header.set('BMIN', self.beam_bmin/3600.)
header.set('BPA', self.beam_bpa)
header.set('BSCALE',1.)
header.set('BZERO',0.)
header.set('CDELT1',-self.pixsize/3600.)
header.set('CDELT2',self.pixsize/3600.)
header.set('CTYPE1',self.ctype1)
header.set('CTYPE2',self.ctype2)
header.set('CRPIX1',self.crpix1)
header.set('CRPIX2',self.crpix2)
header.set('CRVAL1',self.crval1)
header.set('CRVAL2',self.crval2)
# Define HDU
hdu = fits.PrimaryHDU(data=data,header=header)
hdulist = fits.HDUList([hdu])
hdulist.writeto(outputfile,overwrite=True)
def save(self):
""" Write img & source collection to ROOT file """
# Loop over sources
print ('Filling #%s sources to ROOT tree...' % str(len(self.caesar_sources)) )
for item in self.caesar_sources:
#self.cs= item
item.Copy(self.cs)
self.cs.Print()
self.outtree.Fill()
# Write to file
self.outfile.cd()
self.caesar_img.Write()
self.outtree.Write()
self.outfile.Close()
# Write CASA mask file
self.write_casa_mask(boxsize=self.mask_boxsize)
###########################
##############
## MAIN ##
##############
def main():
"""Main function"""
#===========================
#== Get script args
#===========================
print('Get script args')
try:
args= get_args()
except Exception as ex:
print("Failed to get and parse options (err=%s)",str(ex))
return 1
# - Image args
Nx= args.nx
Ny= args.ny
marginX= args.marginx
marginY= args.marginy
pixsize= args.pixsize
ctype1= args.ctype1
ctype2= args.ctype2
crpix1= args.crpix1
crpix2= args.crpix2
crval1= args.crval1
crval2= args.crval2
#- Source model
model_trunc_zmin= args.model_trunc_zmin
trunc_thr= args.trunc_thr
npixels_min= args.npixels_min
# - Bkg info args
enable_bkg= args.enable_bkg
bkg_level= args.bkg_level
bkg_rms= args.bkg_rms
# - Compact source args
enable_compactsources= args.enable_compactsources
Bmaj= args.bmaj
Bmin= args.bmin
Bpa= args.bpa
Zmin= args.zmin
Zmax= args.zmax
source_density= args.source_density
# - Extended source args
enable_extsources= args.enable_extsources
ext_source_type= args.ext_source_type
Zmin_ext= args.zmin_ext
Zmax_ext= args.zmax_ext
ext_source_density= args.ext_source_density
ext_scale_min= args.ext_scale_min
ext_scale_max= args.ext_scale_max
ring_rmin= args.ring_rmin
ring_rmax= args.ring_rmax
ring_wmin= args.ring_wmin
ring_wmax= args.ring_wmax
ellipse_rmin= args.ellipse_rmin
ellipse_rmax= args.ellipse_rmax
disk_shell_ampl_ratio_min= args.disk_shell_ampl_ratio_min
disk_shell_ampl_ratio_max= args.disk_shell_ampl_ratio_max
disk_shell_radius_ratio_min= args.disk_shell_radius_ratio_min
disk_shell_radius_ratio_max= args.disk_shell_radius_ratio_max
# - Output args
outputfile= args.outputfile
mask_outputfile= args.outputfile_model
outputfile_sources= args.outputfile_sources
outputfile_ds9region= args.outputfile_ds9region
outputfile_casaregion= args.outputfile_casaregion
mask_boxsize= args.mask_boxsize
print("*** ARGS ***")
print("Nx: %s" % Nx)
print("Ny: %s" % Ny)
print("Margin X: %s" % marginX)
print("Margin Y: %s" % marginY)
print("pixsize: %s" % pixsize)
print("ctype: (%s %s)" % (ctype1,ctype2))
print("crpix: (%s %s)" % (crpix1,crpix2))
print("crval: (%s %s)" % (crval1,crval2))
print("Beam (Bmaj/Bmin/Bpa): (%s,%s,%s)" % (Bmaj, Bmin, Bpa))
print("Enable bkg? %s" % str(enable_bkg) )
print("Bkg info (level,rms): (%s,%s)" % (bkg_level, bkg_rms))
print("Enable compact sources? %s" % str(enable_compactsources) )
print("Source significance range: (%s,%s)" % (Zmin, Zmax))
print("Source density (deg^-2): %s" % source_density)
print("Enable extended sources? %s" % str(enable_extsources) )
print("Extended source type %s" %str(ext_source_type) )
print("Extended source significance range: (%s,%s)" % (Zmin_ext, Zmax_ext))
print("Extended source density (deg^-2): %s" % ext_source_density)
print("Extended source scale min/max: (%s,%s)" % (ext_scale_min, ext_scale_max))
print("Output filename: %s " % outputfile)
print("Model trunc thr: %s " % str(trunc_thr))
print("Mask output filename: %s " % mask_outputfile)
print("Mask box size: %s " % mask_boxsize)
print("************")
## Generate simulated sky map
print ('INFO: Generate simulated sky map...')
simulator= SkyMapSimulator(Nx,Ny,pixsize)
simulator.set_margins(marginX,marginY)
simulator.set_ref_pix(crpix1,crpix2)
simulator.set_ref_pix_coords(crval1,crval2)
simulator.set_coord_system_type(ctype1,ctype2)
simulator.set_model_trunc_thr(trunc_thr)
simulator.set_model_trunc_significance(model_trunc_zmin)
simulator.set_npixels_min(npixels_min)
simulator.set_map_filename(outputfile)
simulator.set_model_filename(mask_outputfile)
simulator.set_source_filename(outputfile_sources)
simulator.set_ds9region_filename(outputfile_ds9region)
simulator.set_casaregion_filename(outputfile_casaregion)
simulator.enable_bkg(enable_bkg)
simulator.set_bkg_pars(bkg_level,bkg_rms)
simulator.set_beam_info(Bmaj,Bmin,Bpa)
simulator.enable_compact_sources(enable_compactsources)
simulator.set_source_significance_range(Zmin,Zmax)
simulator.set_source_density(source_density)
simulator.enable_extended_sources(enable_extsources)
simulator.set_ext_source_type(ext_source_type)
simulator.set_ext_source_significance_range(Zmin_ext,Zmax_ext)
simulator.set_ext_source_density(ext_source_density)
#simulator.set_ring_pars(ring_rmin,ring_rmax,ring_wmin,ring_wmax)
simulator.set_ring_pars(ext_scale_min,ext_scale_max,ring_wmin,ring_wmax)
#simulator.set_ellipse_pars(ellipse_rmin,ellipse_rmax)
simulator.set_ellipse_pars(ext_scale_min,ext_scale_max)
simulator.set_disk_pars(ext_scale_min,ext_scale_max)
simulator.set_disk_shell_pars(disk_shell_ampl_ratio_min,disk_shell_ampl_ratio_max,disk_shell_radius_ratio_min,disk_shell_radius_ratio_max)
simulator.set_mask_box_size(mask_boxsize)
#[data, mask_data]= simulator.generate_map()
simulator.generate_map()
## Write fits
#print ('INFO: Writing images to FITS...')
#simulator.write_map(data,outputfile)
#simulator.write_source_map(mask_data,mask_outputfile)
## Write sources to ROOT
#print ('INFO: Writing source collection to ROOT TTree...')
#simulator.write_source_tree(outputfile_sources)
## Draw image
#print ('INFO: Draw image...')
#plt.imshow(data, origin='lower', cmap="hot")
#pylab.show()
###################
## MAIN EXEC ##
###################
if __name__ == "__main__":
#main()
sys.exit(main())
| gpl-3.0 |
gef756/statsmodels | statsmodels/examples/ex_outliers_influence.py | 34 | 3906 |
from __future__ import print_function
import numpy as np
import statsmodels.stats.outliers_influence as oi
if __name__ == '__main__':
import statsmodels.api as sm
data = np.array('''\
64 57 8
71 59 10
53 49 6
67 62 11
55 51 8
58 50 7
77 55 10
57 48 9
56 42 10
51 42 6
76 61 12
68 57 9'''.split(), float).reshape(-1,3)
varnames = 'weight height age'.split()
endog = data[:,0]
exog = sm.add_constant(data[:,2])
res_ols = sm.OLS(endog, exog).fit()
hh = (res_ols.model.exog * res_ols.model.pinv_wexog.T).sum(1)
x = res_ols.model.exog
hh_check = np.diag(np.dot(x, np.dot(res_ols.model.normalized_cov_params, x.T)))
from numpy.testing import assert_almost_equal
assert_almost_equal(hh, hh_check, decimal=13)
res = res_ols #alias
#http://en.wikipedia.org/wiki/PRESS_statistic
#predicted residuals, leave one out predicted residuals
resid_press = res.resid / (1-hh)
ess_press = np.dot(resid_press, resid_press)
sigma2_est = np.sqrt(res.mse_resid) #can be replace by different estimators of sigma
sigma_est = np.sqrt(sigma2_est)
resid_studentized = res.resid / sigma_est / np.sqrt(1 - hh)
#http://en.wikipedia.org/wiki/DFFITS:
dffits = resid_studentized * np.sqrt(hh / (1 - hh))
nobs, k_vars = res.model.exog.shape
#Belsley, Kuh and Welsch (1980) suggest a threshold for abs(DFFITS)
dffits_threshold = 2 * np.sqrt(k_vars/nobs)
res_ols.df_modelwc = res_ols.df_model + 1
n_params = res.model.exog.shape[1]
#http://en.wikipedia.org/wiki/Cook%27s_distance
cooks_d = res.resid**2 / sigma2_est / res_ols.df_modelwc * hh / (1 - hh)**2
#or
#Eubank p.93, 94
cooks_d2 = resid_studentized**2 / res_ols.df_modelwc * hh / (1 - hh)
#threshold if normal, also Wikipedia
from scipy import stats
alpha = 0.1
#df looks wrong
print(stats.f.isf(1-alpha, n_params, res.df_resid))
print(stats.f.sf(cooks_d, n_params, res.df_resid))
print('Cooks Distance')
print(cooks_d)
print(cooks_d2)
doplot = 0
if doplot:
import matplotlib.pyplot as plt
fig = plt.figure()
# ax = fig.add_subplot(3,1,1)
# plt.plot(andrew_results.weights, 'o', label='rlm weights')
# plt.legend(loc='lower left')
ax = fig.add_subplot(3,1,2)
plt.plot(cooks_d, 'o', label="Cook's distance")
plt.legend(loc='upper left')
ax2 = fig.add_subplot(3,1,3)
plt.plot(resid_studentized, 'o', label='studentized_resid')
plt.plot(dffits, 'o', label='DFFITS')
leg = plt.legend(loc='lower left', fancybox=True)
leg.get_frame().set_alpha(0.5) #, fontsize='small')
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize='small') # the legend text fontsize
print(oi.reset_ramsey(res, degree=3))
#note, constant in last column
for i in range(1):
print(oi.variance_inflation_factor(res.model.exog, i))
infl = oi.OLSInfluence(res_ols)
print(infl.resid_studentized_external)
print(infl.resid_studentized_internal)
print(infl.summary_table())
print(oi.summary_table(res, alpha=0.05)[0])
'''
>>> res.resid
array([ 4.28571429, 4. , 0.57142857, -3.64285714,
-4.71428571, 1.92857143, 10. , -6.35714286,
-11. , -1.42857143, 1.71428571, 4.64285714])
>>> infl.hat_matrix_diag
array([ 0.10084034, 0.11764706, 0.28571429, 0.20168067, 0.10084034,
0.16806723, 0.11764706, 0.08403361, 0.11764706, 0.28571429,
0.33613445, 0.08403361])
>>> infl.resid_press
array([ 4.76635514, 4.53333333, 0.8 , -4.56315789,
-5.24299065, 2.31818182, 11.33333333, -6.94036697,
-12.46666667, -2. , 2.58227848, 5.06880734])
>>> infl.ess_press
465.98646628086374
'''
| bsd-3-clause |
ckinzthompson/biasd | biasd/gui/preferences.py | 1 | 6654 | # -*- coding: utf-8 -*-®
'''
GUI written in QT5 to setup preferences
'''
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QComboBox, QLabel, QLineEdit, QMessageBox, QMainWindow, QRadioButton, QGroupBox, QGridLayout, QSpinBox, QFileDialog, QFrame
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QDoubleValidator
from PyQt5.QtCore import Qt, QThread
# Make sure that we are using QT5
import matplotlib
matplotlib.use('Qt5Agg')
import sys
import numpy as np
import biasd as b
class prefs(QWidget):
def __init__(self,parent):
super(QWidget,self).__init__(parent=parent)
self.initialize()
def initialize(self):
layout = QVBoxLayout()
# Change Likelihood
vbox = QVBoxLayout()
likelihoods = ["Python","C","CUDA"]
self.rbs = [QRadioButton(rtext) for rtext in likelihoods]
self.change_ll(True)
[vbox.addWidget(r) for r in self.rbs]
frame1 = QGroupBox("Likelihood Function")
frame1.setLayout(vbox)
layout.addWidget(frame1)
# Speed Test
grid1 = QGridLayout()
self.spin = [QSpinBox(), QSpinBox()]
[s.setRange(1,1000000000) for s in self.spin]
self.btest = QPushButton("Test")
lrepeats = QLabel("Repeats")
ldatapoints = QLabel("Datapoints")
self.lavg = QLabel("")
grid1.addWidget(lrepeats,0,0)
grid1.addWidget(ldatapoints,0,1)
grid1.addWidget(self.lavg,0,2)
grid1.addWidget(self.spin[0],1,0)
grid1.addWidget(self.spin[1],1,1)
grid1.addWidget(self.btest,1,2)
frame2 = QGroupBox("Speed Test Likelihood Function")
frame2.setLayout(grid1)
layout.addWidget(frame2)
# Options
frame_options = QGroupBox('Options')
grid2 = QGridLayout()
leps = QLabel(u"Numerical Integration Error, ε")
self.le_eps = QLineEdit()
self.le_eps.setValidator(QDoubleValidator(1e-300,1e300,100))
lthreads = QLabel('Number of MCMC Threads')
self.spin_threads = QSpinBox()
self.spin_threads.setRange(1,1000000)
grid2.addWidget(leps,0,0)
grid2.addWidget(self.le_eps,0,1)
grid2.addWidget(lthreads,1,0)
grid2.addWidget(self.spin_threads,1,1)
frame_options.setLayout(grid2)
layout.addWidget(frame_options)
# Reset and Log
frame3 = QFrame()
hbox = QHBoxLayout()
breset = QPushButton("Reset")
bdumplog = QPushButton("Save Log")
hbox.addWidget(bdumplog)
hbox.addWidget(breset)
frame3.setLayout(hbox)
layout.addWidget(frame3)
layout.addStretch(1)
self.setLayout(layout)
#Fill Forms
self.init_forms()
# Connect Forms & Buttons
[r.toggled.connect(self.change_ll) for r in self.rbs]
[s.valueChanged.connect(self.update_speed) for s in self.spin]
self.btest.clicked.connect(self.test_likelihood)
#self.le_eps.returnPressed.connect(self.update_eps)
self.le_eps.editingFinished.connect(self.update_eps)
self.spin_threads.valueChanged.connect(self.update_threads)
breset.clicked.connect(self.check_reset)
bdumplog.clicked.connect(self.save_log)
self.setWindowTitle('Set Preferences')
# self.setGeometry(200,200,500,300)
self.show()
def update_speed(self):
p = self.parent().parent().prefs
p.speed_n = self.spin[0].value()
p.speed_d = self.spin[1].value()
def update_threads(self):
p = self.parent().parent().prefs
p.n_threads = self.spin_threads.value()
self.parent().parent().log.new('Updated N threads = '+str(p.n_threads))
def update_eps(self):
p = self.parent().parent().prefs
p.eps = np.array(float(self.le_eps.text()),dtype='float64')
b.likelihood._eps = p.eps
self.parent().parent().log.new('Updated epsilon = '+str(p.eps))
def check_reset(self):
really = QMessageBox.question(self,"Reset?","Do you really want to reset the preferences?")
if really == QMessageBox.Yes:
self.reset()
def init_forms(self):
p = self.parent().parent().prefs
self.spin[0].setValue(p.speed_n)
self.spin[1].setValue(p.speed_d)
self.lavg.setText("")
self.le_eps.setText(str(p.eps))
self.spin_threads.setValue(p.n_threads)
def reset(self):
p = self.parent().parent().prefs
p.reset()
self.init_forms()
self.parent().parent().parent().statusBar().showMessage("Reset Preferences")
self.parent().parent().log.new("Reset Preferences")
def save_log(self):
print self.parent().parent().log.format()
oname = QFileDialog.getSaveFileName(self,"Save Log file",'./','*.txt')
try:
if not oname[0]:
return
f = open(oname[0],'w')
f.write(self.parent().parent().log.format())
f.close()
except:
QMessageBox.critical(None,"Could Not Save","Could not save file: %s\n."%(oname[0]))
def speed_tester(self):
try:
sb = self.parent().parent().parent().statusBar()
p = self.parent().parent().prefs
sb.showMessage('Testing Speed...')
time = b.likelihood.test_speed(p.speed_n,p.speed_d)
sout = str(time)+u' μsec/datapoint'
self.lavg.setText(sout)
self.parent().parent().log.new('Speed Test - '
+ b.likelihood.ll_version
+ '\n%d, %d, %s'%(p.speed_n, p.speed_d,sout))
sb.showMessage('Test Complete')
except:
pass
def test_likelihood(self):
## Be careful so that you don't lock up people's computers for too long
if b.likelihood.ll_version == 'Python':
ev = 1000.
elif b.likelihood.ll_version == 'C':
ev = 100.
elif b.likelihood.ll_version == 'CUDA':
ev = 10.
p = self.parent().parent().prefs
et = ev/1e6 * p.speed_d * p.speed_n
proceed = True
if et > 10.:
really = QMessageBox.question(self,"Long Time",
"This might take a long time (~ %.0f sec). "%(et)+
"Are you sure you want to perform this test?")
if not really == QMessageBox.Yes:
proceed = False
if proceed:
self.speed_tester()
def change_ll(self,enable):
try:
if self.rbs[0].isChecked():
b.likelihood.use_python_ll()
elif self.rbs[1].isChecked():
failure = 'C'
b.likelihood.use_c_ll()
elif self.rbs[2].isChecked():
failure = 'CUDA'
b.likelihood.use_cuda_ll()
except:
QMessageBox.critical(self,"Can't Find %s Library"%(failure),
"Can't find or load the %s library."%(failure) +
"Check that it is compiled.")
for i,t in zip(range(3),['Python','C','CUDA']):
if b.likelihood.ll_version == t:
self.rbs[i].setChecked(True)
def keyPressEvent(self,event):
if event.key() == Qt.Key_Escape:
self.parent().close()
class ui_preferences(QMainWindow):
def __init__(self,parent=None):
super(QMainWindow,self).__init__(parent)
self.ui = prefs(self)
self.setCentralWidget(self.ui)
self.setGeometry(100,100,400,300)
self.show()
def closeEvent(self,event):
self.parent().activateWindow()
self.parent().raise_()
self.parent().setFocus()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
w = ui_preferences()
sys.exit(app.exec_())
| mit |
navijo/FlOYBD | DataMining/weather/ml/linearRegression.py | 2 | 6641 | from pyspark import SparkContext, SparkConf
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.regression import LinearRegression
from pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import SQLContext, SparkSession
from pyspark.sql import Row
from pyspark.sql.functions import avg
from pyspark.sql.types import *
from pyspark.sql.utils import IllegalArgumentException
from pyspark.sql.functions import UserDefinedFunction
from cassandra.cluster import Cluster
import pyspark
import pandas as pd
import datetime
import time
import numpy as np
import math
import os, uuid
import pickle
import shutil
import py4j
from CustomModel import CustomModel
def clearColumn(dataframe, columnName):
udf = UserDefinedFunction(lambda x: float(0), FloatType())
new_df = dataframe.select(
*[udf(column).alias(columnName) if column == columnName else column for column in dataframe.columns])
return new_df
def assignValue(dataframe, columnName, value):
udf = UserDefinedFunction(lambda x: value, FloatType())
new_df = dataframe.select(
*[udf(column).alias(columnName) if column == columnName else column for column in dataframe.columns])
return new_df
def getStationsIds(stations):
return stations.select("station_id", "name").collect()
def insertDataIntoDatabase(dataToInsert, columnName, station_id):
realValStr = dataToInsert.select("avg(" + columnName + ")").collect()[0][0]
realVal = float(realValStr)
predictedValStr = dataToInsert.select("avg(prediction)").collect()[0][0]
predictedVal = float(predictedValStr)
print("Real Val:" + str(realVal) + "\tPredicted Val: " + str(predictedVal) + "\t Prediction Diff: " + str(
realVal - predictedVal))
session.execute("INSERT INTO Station_Regression_Prediction (station_id," + columnName + "," + columnName + "_pred"")\
VALUES (%s, %s, %s)", [str(station_id), realVal, predictedVal])
def parseFunction(row):
print(row)
def saveModelToDatabase(model, station_id, columnName):
name = str(station_id + "__" + columnName)
print("Saving the model..." + str(name))
# fid = uuid.uuid4()
# res = bytearray([model])
res = pickle.dumps(CustomModel(model), fix_imports=False)
session.execute("INSERT INTO linear_model (name,model) VALUES (%s, %s)", (name, res))
def saveModel(model, station_id, columnName):
directory = "/home/ubuntu/GSOC17/FlOYBD/Flask/models"
if not os.path.exists(directory):
os.makedirs(directory)
path = directory + "/" + station_id + "__" + columnName
print("Saving the model in..." + str(path))
model.save(str(path))
def predictDataForStation(stationData, columnName, station_id):
columnsList = ["max_temp", "med_temp", "min_temp", "max_pressure", "min_pressure", "precip", "insolation"]
# assembler = VectorAssembler(inputCols=columnsList,outputCol="features")
assembler = VectorAssembler(inputCols=[columnName], outputCol="features")
assembledData = assembler.transform(stationData)
feature_data = assembledData.withColumn("label", stationData[columnName]).withColumn("features",
assembledData.features)
print("Getting training data...")
test_data = feature_data.sample(False, 0.1)
train_data = feature_data.sample(False, 0.9)
print("Test data: " + str(test_data.count()) + " , Train data: " + str(train_data.count()))
# BestModel
lr = LinearRegression()
paramGrid = ParamGridBuilder() \
.addGrid(lr.regParam, [0.1, 0.01, 0.001, 0.0001, 0.0001]) \
.addGrid(lr.fitIntercept, [False, True]) \
.addGrid(lr.maxIter, [1, 10, 50, 100]) \
.build()
try:
print("Calculating and training the best model")
tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=paramGrid, evaluator=RegressionEvaluator(),
trainRatio=0.8)
# Fit the model
lrModel = tvs.fit(train_data)
saveModel(lrModel.bestModel, station_id, columnName)
##### AQUESTES LINIES SON LES BONES!!!!######
# predictions = lrModel.transform(test_data).select("measure_date","station_id",columnName,"prediction")
# groupedPredictions = predictions.groupBy("station_id").agg(avg(columnName),avg("prediction"))
# insertDataIntoDatabase(groupedPredictions,columnName,station_id)
except IllegalArgumentException as error:
print("#####IllegalArgumentException on :\t " + str(station_id) + " on " + str(columnName) + "#####")
print("IllegalArgumentException : {0}".format(error))
except py4j.protocol.Py4JJavaError as error:
print("#####Py4JJavaError on :\t " + str(station_id) + " on " + str(columnName) + "#####")
print("Py4JJavaError : {0}".format(error))
if __name__ == "__main__":
start_time = time.time()
global stations, dailyData, sc
conf = SparkConf()
# conf.setMaster("spark://192.168.246.236:7077")
conf.setMaster("local[*]")
conf.setAppName("Linear Regression Spark2")
conf.set("spark.cassandra.connection.host", "192.168.246.236")
conf.set("spark.executor.memory", "10g")
conf.set("spark.num.executors", "2")
cluster = Cluster(['192.168.246.236'])
session = cluster.connect("dev")
sc = SparkContext(conf=conf)
# sc.setLogLevel("INFO")
sql = SQLContext(sc)
spark = SparkSession(sc)
print("SparkContext => ", sc)
print("SQLContext => ", sql)
shutil.rmtree('/home/ubuntu/GSOC17/FlOYBD/Flask/models')
stations = sql.read.format("org.apache.spark.sql.cassandra").load(keyspace="dev", table="station")
clean_data = sql.read.format("org.apache.spark.sql.cassandra").load(keyspace="dev", table="clean_daily_measurement")
stationsIds = getStationsIds(stations)
stationCount = 1
columnsToPredict = ["max_temp", "med_temp", "min_temp", "max_pressure", "min_pressure", "precip", "insolation"]
for station in stationsIds:
print(
"##############\tProcessing station #" + str(stationCount) + " :\t " + str(station.station_id) + "-" + str(
station.name.encode('utf-8')) + "\t##############")
data = clean_data[clean_data.station_id == station.station_id]
for column in columnsToPredict:
print("Processing column " + column)
predictDataForStation(data, column, station.station_id)
stationCount += 1
print("--- %s seconds ---" % (time.time() - start_time))
print("END!!!")
sc.stop()
| mit |
kjs73/pele | pele/gui/ui/dgraph_dlg.py | 5 | 24356 | import sys
from PyQt4 import QtGui
from PyQt4.QtGui import QApplication, QWidget, QColorDialog, QInputDialog
from PyQt4.QtCore import pyqtSlot
import networkx as nx
import dgraph_browser
from pele.utils.disconnectivity_graph import DisconnectivityGraph, database2graph, TreeLeastCommonAncestor
from pele.storage import Database, TransitionState
from pele.utils.events import Signal
from pele.rates import RatesLinalg, compute_committors
def check_thermodynamic_info(transition_states):
"""return False if any transition state or minimum does not have pgorder or fvib"""
def myiter(tslist):
for ts in tslist:
yield ts
yield ts.minimum1
yield ts.minimum2
for mts in myiter(transition_states):
if not mts.invalid:
if mts.fvib is None or mts.pgorder is None:
return False
return True
def minimum_energy_path_old(graph, m1, m2):
"""find the minimum energy path between m1 and m2 and color the dgraph appropriately"""
# add weight attribute to the graph
# note: this is not actually the minimum energy path.
# This minimizes the sum of energies along the path
# TODO: use minimum spanning tree to find the minimum energy path
emin = min(( m.energy for m in graph.nodes_iter() ))
for u, v, data in graph.edges_iter(data=True):
data["weight"] = data["ts"].energy - emin
path = nx.shortest_path(graph, m1, m2, weight="weight")
return path
def minimum_energy_path(graph, m1, m2):
for u, v, data in graph.edges_iter(data=True):
data["energy"] = data["ts"].energy
mst = nx.minimum_spanning_tree(graph, weight="energy")
path = nx.shortest_path(mst, m1, m2)
return path
# transition_states = [data["ts"] for u, v, data in graph.edges_iter(data=True)]
# transition_states.sort(key=lambda ts: ts.energy) # small energies to the left
#
# subtrees = nx.utils.UnionFind()
# for ts in transition_states:
# u, v = ts.minimum1, ts.minimum2
# if subtrees[u] != subtrees[v]:
# subtrees.union(u,v)
# if subtrees[m1] == subtrees[m2]:
# break
# if subtrees
class LabelMinimumAction(QtGui.QAction):
"""This action will create a dialog box to label a minimum"""
def __init__(self, minimum, parent=None):
QtGui.QAction.__init__(self, "add label", parent)
self.parent = parent
self.minimum = minimum
self.triggered.connect(self.__call__)
def __call__(self, val):
dialog = QInputDialog(parent=self.parent)
# dialog.setLabelText("")
dialog.setLabelText("set label for minimum: " + str(self.minimum.energy))
dialog.setInputMode(0)
dialog.exec_()
if dialog.result():
label = dialog.textValue()
self.parent._minima_labels[self.minimum] = label
class ColorPathAction(QtGui.QAction):
"""this action will color the minimum energy path to minimum1"""
def __init__(self, minimum1, minimum2, parent=None):
QtGui.QAction.__init__(self, "show path to %d" % minimum2._id, parent)
self.parent = parent
self.minimum1 = minimum1
self.minimum2 = minimum2
self.triggered.connect(self.__call__)
def __call__(self, val):
self.parent._color_minimum_energy_path(self.minimum1, self.minimum2)
class ColorMFPTAction(QtGui.QAction):
"""this action will color the minima by mean first passage times to minimum1"""
def __init__(self, minimum1, parent=None):
QtGui.QAction.__init__(self, "color by mfpt", parent)
self.parent = parent
self.minimum1 = minimum1
self.triggered.connect(self.__call__)
def __call__(self, val):
dialog = QInputDialog(parent=self.parent)
# dialog.setLabelText("")
dialog.setLabelText("Temperature for MFPT calculation")
dialog.setInputMode(2)
dialog.setDoubleValue(1.)
dialog.exec_()
if dialog.result():
T = dialog.doubleValue()
self.parent._color_by_mfpt(self.minimum1, T=T)
class ColorCommittorAction(QtGui.QAction):
"""this action will color the graph by committor probabilities"""
def __init__(self, minimum1, minimum2, parent=None):
QtGui.QAction.__init__(self, "color by committor %d" % minimum2._id, parent)
self.parent = parent
self.minimum1 = minimum1
self.minimum2 = minimum2
self.triggered.connect(self.__call__)
def __call__(self, val):
dialog = QInputDialog(parent=self.parent)
# dialog.setLabelText("")
dialog.setLabelText("Temperature for committor calculation")
dialog.setInputMode(2)
dialog.setDoubleValue(1.)
dialog.exec_()
if dialog.result():
T = dialog.doubleValue()
self.parent._color_by_committor(self.minimum1, self.minimum2, T=T)
class LayoutByCommittorAction(QtGui.QAction):
"""this action will color the graph by committor probabilities"""
def __init__(self, minimum1, minimum2, parent=None):
QtGui.QAction.__init__(self, "layout by committor %d" % minimum2._id, parent)
self.parent = parent
self.minimum1 = minimum1
self.minimum2 = minimum2
self.triggered.connect(self.__call__)
def __call__(self, val):
dialog = QInputDialog(parent=self.parent)
# dialog.setLabelText("")
dialog.setLabelText("Temperature for committor calculation")
dialog.setInputMode(2)
dialog.setDoubleValue(1.)
dialog.exec_()
if dialog.result():
T = dialog.doubleValue()
self.parent._layout_by_committor(self.minimum1, self.minimum2, T=T)
class DGraphWidget(QWidget):
"""
dialog for showing and modifying the disconnectivity graph
Parameters
----------
database : Database object
graph : networkx Graph, optional
you can bypass the database and pass a graph directly. if you pass the graph,
pass None as the database
params : dict
initialize the values for the disconnectivity graph
"""
def __init__(self, database, graph=None, params=None, parent=None):
if params is None: params = dict()
super(DGraphWidget, self).__init__(parent=parent)
self.database = database
self.graph = graph
self.ui = dgraph_browser.Ui_Form()
self.ui.setupUi(self)
self.canvas = self.ui.widget.canvas
# self.ui.wgt_mpl_toolbar = NavigationToolbar()
# self.toolbar = self.
self.input_params = params.copy()
self.params = {}
self.set_defaults()
self.minimum_selected = Signal()
# self.minimum_selected(minim)
self._selected_minimum = None
# self.rebuild_disconnectivity_graph()
self.colour_tree = []
self.tree_selected = None
self._tree_cid = None
self._minima_cid = None
self._minima_labels = dict()
# # populate the dropdown list with the color names
# self._colors = sorted(col.cnames.keys())
# self.ui.comboBox_colour.addItems(self._colors)
# [self.ui.comboBox_colour.addItem(s) for s in self._colors]
# self.ui.comboBox_colour.activated[str].connect(self._color_tree)
def _set_checked(self, keyword, default):
"""utility to set the default values for check boxes
objects must have the name chkbx_keyword
"""
if keyword in self.input_params:
v = self.input_params[keyword]
else:
v = default
line = "self.ui.chkbx_%s.setChecked(bool(%d))" % (keyword, v)
exec line
def _set_lineEdit(self, keyword, default=None):
"""utility to set the default values for lineEdit objects
objects must have the name lineEdit_keyword
"""
if keyword in self.input_params:
v = self.input_params[keyword]
else:
v = default
if v is not None:
line = "self.ui.lineEdit_%s.setText(str(%s))" % (keyword, str(v))
exec line
def set_defaults(self):
self._set_checked("center_gmin", True)
self._set_checked("show_minima", True)
self._set_checked("order_by_energy", False)
self._set_checked("order_by_basin_size", True)
self._set_checked("include_gmin", True)
self._set_checked("show_trees", False)
# self.ui.chkbx_show_minima.setChecked(True)
# self.ui.chkbx_order_by_energy.setChecked(False)
# self.ui.chkbx_order_by_basin_size.setChecked(True)
# self.ui.chkbx_include_gmin.setChecked(True)
self._set_lineEdit("Emax")
self._set_lineEdit("subgraph_size")
self._set_lineEdit("nlevels")
# self.line_width = 0.5
self._set_lineEdit("linewidth", default=0.5)
def _get_input_parameters(self):
self.params = self.input_params.copy()
if "show_minima" in self.params:
self.params.pop("show_minima")
params = self.params
Emax = self.ui.lineEdit_Emax.text()
if len(Emax) > 0:
self.params["Emax"] = float(Emax)
subgraph_size = self.ui.lineEdit_subgraph_size.text()
if len(subgraph_size) > 0:
self.params["subgraph_size"] = int(subgraph_size)
nlevels = self.ui.lineEdit_nlevels.text()
if len(nlevels) > 0:
self.params["nlevels"] = int(nlevels)
offset = self.ui.lineEdit_offset.text()
if len(offset) > 0:
params["node_offset"] = float(offset)
line_width = self.ui.lineEdit_linewidth.text()
if len(line_width) > 0:
self.line_width = float(line_width)
self.title = self.ui.lineEdit_title.text()
params["center_gmin"] = self.ui.chkbx_center_gmin.isChecked()
self.show_minima = self.ui.chkbx_show_minima.isChecked()
params["order_by_energy"] = self.ui.chkbx_order_by_energy.isChecked()
params["order_by_basin_size"] = self.ui.chkbx_order_by_basin_size.isChecked()
params["include_gmin"] = self.ui.chkbx_include_gmin.isChecked()
self.show_trees = self.ui.chkbx_show_trees.isChecked()
# @pyqtSlot(str)
# def _color_tree(self, colour):
# if self.tree_selected is not None:
# c = col.hex2color(col.cnames[str(colour)])
# print "coloring tree", colour, self.tree_selected
#
# for tree in self.tree_selected.get_all_trees():
# tree.data["colour"] = c
#
# self.redraw_disconnectivity_graph()
## self.tree_selected = None
@pyqtSlot()
def on_btnRedraw_clicked(self):
self.redraw_disconnectivity_graph()
@pyqtSlot()
def on_btnRebuild_clicked(self):
self.rebuild_disconnectivity_graph()
def redraw_disconnectivity_graph(self):
self.params = self._get_input_parameters()
self._draw_disconnectivity_graph(self.show_minima, self.show_trees)
def rebuild_disconnectivity_graph(self):
self._get_input_parameters()
self._minima_labels = dict()
self._build_disconnectivity_graph(**self.params)
self._draw_disconnectivity_graph(self.show_minima, self.show_trees)
def _build_disconnectivity_graph(self, **params):
if self.database is not None:
db = self.database
apply_Emax = "Emax" in params and "T" not in params
if apply_Emax:
self.graph = database2graph(db, Emax=params['Emax'])
else:
self.graph = database2graph(db)
dg = DisconnectivityGraph(self.graph, **params)
dg.calculate()
self.dg = dg
def _get_tree_layout(self, tree):
treelist = []
xlist = []
energies = []
for tree in tree.get_all_trees():
xlist.append(tree.data["x"])
treelist.append(tree)
if tree.is_leaf():
energies.append(tree.data["minimum"].energy)
else:
energies.append(tree.data["ethresh"])
return treelist, xlist, energies
def _on_pick_tree(self, event):
"""a matplotlib callback function for when a tree is clicked on"""
if event.artist != self._treepoints:
# print "you clicked on something other than a node"
return True
ind = event.ind[0]
self.tree_selected = self._tree_list[ind]
print "tree clicked on", self.tree_selected
# launch a color selector dialog and color
# all subtrees by the selected color
color_dialog = QColorDialog(parent=self)
color_dialog.exec_()
if color_dialog.result():
color = color_dialog.selectedColor()
rgba = color.getRgbF() # red green blue alpha
print "color", rgba
rgb = rgba[:3]
for tree in self.tree_selected.get_all_trees():
tree.data["colour"] = rgb
self.redraw_disconnectivity_graph()
def _color_minimum_energy_path(self, m1, m2):
"""find the minimum energy path between m1 and m2 and color the dgraph appropriately"""
# add weight attribute to the graph
# note: this is not actually the minimum energy path.
# This minimizes the sum of energies along the path
# TODO: use minimum spanning tree to find the minimum energy path
path = minimum_energy_path(self.graph, m1, m2)
# emin = min(( m.energy for m in self.graph.nodes_iter() ))
# for u, v, data in self.graph.edges_iter(data=True):
# data["weight"] = data["ts"].energy - emin
# path = nx.shortest_path(self.graph, m1, m2, weight="weight")
print "there are", len(path), "minima in the path from", m1._id, "to", m2._id
# color all trees up to the least common ancestor in the dgraph
trees = [self.dg.minimum_to_leave[m] for m in path]
ancestry = TreeLeastCommonAncestor(trees)
all_trees = ancestry.get_all_paths_to_common_ancestor()
# remove the least common ancestor so the coloring doesn't go to higher energies
all_trees.remove(ancestry.least_common_ancestor)
# color the trees
for tree in all_trees:
tree.data["colour"] = (1., 0., 0.)
self.redraw_disconnectivity_graph()
def _color_by_mfpt(self, min1, T=1.):
print "coloring by the mean first passage time to get to minimum", min1._id
# get a list of transition states in the same cluster as min1
edges = nx.bfs_edges(self.graph, min1)
transition_states = [ self.graph.get_edge_data(u, v)["ts"] for u, v in edges ]
if not check_thermodynamic_info(transition_states):
raise Exception("The thermodynamic information is not yet computed")
# get an arbitrary second minimum2
for ts in transition_states:
if ts.minimum2 != min1:
min2 = ts.minimum2
break
A = [min1]
B = [min2]
rcalc = RatesLinalg(transition_states, A, B, T=T)
rcalc.compute_rates()
mfptimes = rcalc.get_mfptimes()
tmax = max(mfptimes.itervalues())
def get_mfpt(m):
try:
return mfptimes[m]
except KeyError:
return tmax
self.dg.color_by_value(get_mfpt)
self.redraw_disconnectivity_graph()
def _color_by_committor(self, min1, min2, T=1.):
print "coloring by the probability that a trajectory gets to minimum", min1._id, "before", min2._id
# get a list of transition states in the same cluster as min1
edges = nx.bfs_edges(self.graph, min1)
transition_states = [ self.graph.get_edge_data(u, v)["ts"] for u, v in edges ]
if not check_thermodynamic_info(transition_states):
raise Exception("The thermodynamic information is not yet computed")
A = [min2]
B = [min1]
committors = compute_committors(transition_states, A, B, T=T)
def get_committor(m):
try:
return committors[m]
except KeyError:
return 1.
self.dg.color_by_value(get_committor)
self.redraw_disconnectivity_graph()
def _layout_by_committor(self, min1, min2, T=1.):
print "coloring by the probability that a trajectory gets to minimum", min1._id, "before", min2._id
# get a list of transition states in the same cluster as min1
edges = nx.bfs_edges(self.graph, min1)
transition_states = [ self.graph.get_edge_data(u, v)["ts"] for u, v in edges ]
if not check_thermodynamic_info(transition_states):
raise Exception("The thermodynamic information is not yet computed")
A = [min2]
B = [min1]
committors = compute_committors(transition_states, A, B, T=T)
print "maximum committor", max(committors.values())
print "minimum committor", min(committors.values())
print "number of committors near 1", len([v for v in committors.values() if v > 1.-1e-4])
print "number of committors equal to 1", len([v for v in committors.values() if v == 1.])
def get_committor(m):
try:
return committors[m]
except KeyError:
return 1.
self.dg.get_value = get_committor
self.dg._layout_x_axis(self.dg.tree_graph)
self.dg.color_by_value(get_committor)
self.redraw_disconnectivity_graph()
def _on_left_click_minimum(self, minimum):
print "you clicked on minimum with id", minimum._id, "and energy", minimum.energy
self.minimum_selected(minimum)
self._selected_minimum = minimum
self.ui.label_selected_minimum.setText("%g (%d)" % (minimum.energy, minimum._id))
def _on_right_click_minimum(self, minimum):
"""create a menu with the list of available actions"""
menu = QtGui.QMenu("list menu", parent=self)
action1 = LabelMinimumAction(minimum, parent=self)
menu.addAction(action1)
if self._selected_minimum is not None:
action2 = ColorPathAction(minimum, self._selected_minimum, parent=self)
menu.addAction(action2)
menu.addAction(ColorCommittorAction(minimum, self._selected_minimum, parent=self))
menu.addAction(LayoutByCommittorAction(minimum, self._selected_minimum, parent=self))
action3 = ColorMFPTAction(minimum, parent=self)
menu.addAction(action3)
menu.exec_(QtGui.QCursor.pos())
def _on_pick_minimum(self, event):
"""matplotlib event called when a minimum is clicked on"""
if event.artist != self._minima_points:
# print "you clicked on something other than a node"
return True
ind = event.ind[0]
min1 = self._minima_list[ind]
if event.mouseevent.button == 3:
self._on_right_click_minimum(min1)
else:
self._on_left_click_minimum(min1)
def _draw_disconnectivity_graph(self, show_minima=True, show_trees=False):
ax = self.canvas.axes
ax.clear()
ax.hold(True)
dg = self.dg
# plot the lines and set up the rest of the plot using the built in function
# this might change some of the minima x positions, so this has to go before
# anything dependent on those positions
dg.plot(axes=ax, show_minima=False, linewidth=self.line_width,
title=self.title)
if len(self._minima_labels) > 0:
dg.label_minima(self._minima_labels, axes=ax)
self.ui.widget.canvas.fig.tight_layout()
# if show_trees
if self._tree_cid is not None:
self.canvas.mpl_disconnect(self._tree_cid)
self._tree_cid = None
if show_trees:
# draw the nodes
tree_list, x_pos, energies = self._get_tree_layout(dg.tree_graph)
treepoints = ax.scatter(x_pos, energies, picker=5, color='red', alpha=0.5)
self._treepoints = treepoints
self._tree_list = tree_list
# def on_pick_tree(event):
# if event.artist != treepoints:
# # print "you clicked on something other than a node"
# return True
# ind = event.ind[0]
# self.tree_selected = tree_list[ind]
# print "tree clicked on", self.tree_selected
#
# color_dialog = QColorDialog(parent=self)
# color_dialog.exec_()
# color = color_dialog.selectedColor()
# rgba = color.getRgbF() # red green blue alpha
# print "color", rgba
# rgb = rgba[:3]
# for tree in self.tree_selected.get_all_trees():
# tree.data["colour"] = rgb
self._tree_cid = self.canvas.mpl_connect('pick_event', self._on_pick_tree)
#draw minima as points and make them interactive
if self._minima_cid is not None:
self.canvas.mpl_disconnect(self._minima_cid)
self._minima_cid = None
if show_minima:
xpos, minima = dg.get_minima_layout()
energies = [m.energy for m in minima]
self._minima_points = ax.scatter(xpos, energies, picker=5)
self._minima_list = minima
# def on_pick_min(event):
# if event.artist != points:
# # print "you clicked on something other than a node"
# return True
# ind = event.ind[0]
# min1 = minima[ind]
# print "you clicked on minimum with id", min1._id, "and energy", min1.energy
# self.minimum_selected(min1)
self._minima_cid = self.canvas.mpl_connect('pick_event', self._on_pick_minimum)
self.canvas.draw()
class DGraphDialog(QtGui.QMainWindow):
def __init__(self, database, graph=None, params=None, parent=None, app=None):
if not params: params = {}
super(DGraphDialog, self).__init__(parent=parent)
self.setWindowTitle("Disconnectivity graph")
self.dgraph_widget = DGraphWidget(database, graph, params, parent=self)
self.setCentralWidget(self.dgraph_widget)
def rebuild_disconnectivity_graph(self):
self.dgraph_widget.rebuild_disconnectivity_graph()
def reduced_db2graph(db, Emax):
"""
make a networkx graph from a database including only transition states with energy < Emax
"""
from pele.storage.database import Minimum
g = nx.Graph()
# js850> It's not strictly necessary to add the minima explicitly here,
# but for some reason it is much faster if you do (factor of 2). Even
# if this means there are many more minima in the graph. I'm not sure
# why this is. This step is already often the bottleneck of the d-graph
# calculation.
minima = db.session.query(Minimum).filter(Minimum.energy <= Emax)
g.add_nodes_from(minima)
# if we order by energy first and add the transition states with the largest
# the we will take the smallest energy transition state in the case of duplicates
ts = db.session.query(TransitionState).filter(TransitionState.energy <= Emax)\
.order_by(-TransitionState.energy)
for t in ts:
g.add_edge(t.minimum1, t.minimum2, ts=t)
return g
if __name__ == "__main__":
db = Database("lj31.db", createdb=False)
if len(db.minima()) < 2:
raise Exception("database has no minima")
if True:
from pele.systems import LJCluster
from pele.thermodynamics import get_thermodynamic_information
system = LJCluster(31)
get_thermodynamic_information(system, db, nproc=10)
app = QApplication(sys.argv)
md = DGraphDialog(db)
md.show()
md.rebuild_disconnectivity_graph()
sys.exit(app.exec_())
| gpl-3.0 |
kedz/cuttsum | trec2015/cuttsum/l2s/_base.py | 1 | 6288 | import pyvw
import numpy as np
import pandas as pd
class _SearchBase(pyvw.SearchTask):
def __init__(self, vw, sch, num_actions):
pyvw.SearchTask.__init__(self, vw, sch, num_actions)
sch.set_options( sch.IS_LDF ) # | sch.AUTO_CONDITION_FEATURES )
self._with_scores = False
def list_namespaces(self):
return ["a", "b", "c", "d", "e", "f", "g", "n"]
def get_example_score(self, ex):
w_sum = 0
for ns in self.list_namespaces():
w_sq = 0
for i in xrange(ex.num_features_in(ns)):
w = self.vw.get_weight(ex.feature(ns, i)) * ex.feature_weight(ns, i)
w_sum += w
return w_sum
def get_namespace_scores(self, ex, ns_map=None):
scores = {}
for ns in self.list_namespaces():
score = 0
for i in xrange(ex.num_features_in(ns)):
score += self.vw.get_weight(ex.feature(ns, i)) * ex.feature_weight(ns, i)
if ns_map is None:
scores[ns] = score
else:
scores[ns_map(ns)] = score
return scores
def make_select_example(self, sent, sents, df, cache):
pass
def make_next_example(self, sents, df, cache, is_oracle):
pass
def setup_cache(self):
pass
def update_cache(self, pred, sents, df, cache):
pass
def get_feature_weights(self, dataframes):
pass
def predict_with_scores(self, instance):
self._with_scores = True
seq, df = self.predict(instance)
self._with_scores = False
return seq, df
def _run(self, (event, docs)):
nuggets = set()
cache = self.setup_cache()
output = []
n = 0
loss = 0
if self._with_scores is True:
score_data = []
for doc in docs:
sents = range(len(doc))
while 1:
n += 1
# Create some examples, one for each sentence.
examples = [self.make_select_example(sent, sents, doc, cache)
for sent in sents]
#examples = [ex() for ex in examples]
# Create a final example for the option "next document".
# This example has the feature "all_clear" if the max gain
# of adding any current sentence to the summary is 0.
# Otherwise the feature "stay" is active.
gain = doc.iloc[sents]["nuggets"].apply(
lambda x: len(x.difference(nuggets))).values
# Compute oracle. If max gain > 0, oracle always picks the
# sentence with the max gain. Else, oracle picks the last
# example which is the "next document" option.
if len(sents) > 0:
oracle = np.argmax(gain)
oracle_gain = gain[oracle]
if oracle_gain == 0:
oracle = len(sents)
else:
oracle = 0
oracle_gain = 0
oracle_is_next = oracle == len(sents)
next_ex = self.make_next_example(
sents, doc, cache, oracle_is_next)
examples.append(next_ex)
# Make prediction.
pred = self.sch.predict(
examples=examples,
my_tag=n,
oracle=oracle,
condition=[], # (n-1, "p"), ])
)
output.append(pred)
if oracle_is_next:
if pred != oracle:
loss += 1
else:
if pred < len(sents):
loss += (oracle_gain - gain[pred])
else:
missed = set([nug for ns in doc.iloc[sents]["nuggets"].tolist()
for nug in ns if nug not in nuggets])
loss += len(missed)
if self._with_scores is True:
scores = np.array([self.get_example_score(ex)
for ex in examples])
ns_scores = self.get_namespace_scores(examples[pred], ns_map=lambda x: "l_" + x)
o_ns_scores = self.get_namespace_scores(examples[oracle], ns_map=lambda x: "o_" + x)
if scores.shape[0] > 1:
select_scores = scores[0:-1]
max_select = np.max(select_scores)
min_select = np.min(select_scores)
avg_select = np.mean(select_scores)
med_select = np.median(select_scores)
else:
max_select = 0
min_select = 0
avg_select = 0
med_select = 0
score_data.append({
"max select score": max_select,
"min select score": min_select,
"avg select score": avg_select,
"median select score": med_select,
"next score": scores[-1],
})
score_data[-1].update(ns_scores)
score_data[-1].update(o_ns_scores)
assert np.min(scores) == scores[pred]
if pred < len(sents):
cache = self.update_cache(pred, sents, doc, cache)
nuggets.update(doc.iloc[pred]["nuggets"])
del sents[pred]
elif pred == len(sents):
break
self.sch.loss(loss)
if self._with_scores:
return output, pd.DataFrame(score_data,
columns=["min select score", "max select score",
"avg select score", "median select score",
"next score"] + map(lambda x: "l_" + x, self.list_namespaces()) + map(lambda x: "o_" + x, self.list_namespaces()))
else:
return output
| apache-2.0 |
napsternxg/SemEval_Twitter_Data | PrepareData.py | 1 | 2233 |
# coding: utf-8
# In[1]:
import pandas as pd
import json
import fnmatch
import os
# In[2]:
TWEET_DATA_FILE = "TWEET_DATA.json"
LABELS_BASE_DIR = "data/2download"
OUTPUT_DIR = "data/processed"
FILENAMES = []
INPUT_FILES = []
for root, dirnames, filenames in os.walk(LABELS_BASE_DIR):
for filename in fnmatch.filter(filenames, '*subtask*.txt'):
FILENAMES.append(filename)
INPUT_FILES.append(os.path.join(root, filename))
print INPUT_FILES
print FILENAMES
# In[3]:
tweet_data = json.load(open(TWEET_DATA_FILE))
print len(tweet_data)
# In[4]:
df = pd.read_csv(INPUT_FILES[0], sep="\t", header=None)
# In[5]:
cols = df.columns.tolist()
cols[0] = "tid"
df.columns = cols
df.head()
# In[6]:
def add_tweet(x):
x = "%s" % x
try:
return tweet_data.get(x, {"text": "Not Available"})["text"].replace("\n", " ").replace("\r", " ")
except:
print x
raise
# In[7]:
df["text"] = df["tid"].apply(add_tweet)
df.head()
# In[8]:
df.to_csv("%s/%s" % (OUTPUT_DIR, FILENAMES[0]), sep="\t", header=None, index=False)
# In[23]:
def append_tweets(input_file, output_file):
df = pd.read_csv(input_file, sep="\t", header=None)
cols = df.columns.tolist()
cols[0] = "tid"
df.columns = cols
df["text"] = df["tid"].apply(add_tweet)
df.to_csv(output_file, sep="\t", header=None, index=False)
print "Wrote dataframe with shape: ", df.shape
# In[10]:
append_tweets(INPUT_FILES[0], "%s/%s" % (OUTPUT_DIR, FILENAMES[0]))
# In[24]:
for input_file, output_file in zip(INPUT_FILES, ["%s/%s" % (OUTPUT_DIR, k) for k in FILENAMES]):
print "Processing %s, saving to %s" % (input_file, output_file)
append_tweets(input_file, output_file)
# In[25]:
len(INPUT_FILES), len(FILENAMES)
# In[26]:
len(zip(INPUT_FILES, ["%s/%s" % (OUTPUT_DIR, k) for k in FILENAMES]))
# In[27]:
get_ipython().run_cell_magic(u'bash', u'', u'cd data/processed/\nmkdir -p gold/{dev,devtest,train} input/devtest\nmv *.dev.gold.txt gold/dev\nmv *.devtest.gold.txt gold/devtest/\nmv *.train.gold.txt gold/train/\nmv *.devtest.input.txt input/devtest/\nls')
# In[28]:
get_ipython().run_cell_magic(u'bash', u'', u'cd data/processed/\nfind ./ -name "*.txt"')
# In[ ]:
| gpl-2.0 |
GiulioGx/RNNs | sources/plotUtils/docs_plots/rate_success_temporal.py | 1 | 1217 | import numpy
import matplotlib.pyplot as plt
import sys
from matplotlib.ticker import FormatStrFormatter
from plotUtils.plt_utils import save_multiple_formats
__author__ = 'giulio'
"""This script plots train and validation losses for models trained with different number of hidden units"""
lengths = [10, 20, 50, 100, 150, 200]
rates_rho = [100, 100, 100, 66, 66, 0]
rates_old = [100, 100, 0, 0, 0, 0]
plt.plot(lengths, rates_rho, '--o', color='b', linewidth=1)
plt.plot(lengths, rates_old, '--o', color='r', linewidth=1)
plt.legend(["SGD-C rho>1","SGD-C rho<1"], shadow=True, fancybox=True)
# plt.yscale('log')
# plt.ylim(ymin=4, ymax=13)
plt.xticks(lengths)
plt.xlim(xmin = 0, xmax = 220)
plt.yticks(numpy.arange(11)*10)
plt.ylim(ymin=-10, ymax = 110)
# plt.legend(legends, shadow=True, fancybox=True)
plt.xlabel('lengths')
plt.ylabel('rate of success')
# ax = plt.gca()
# ax.xaxis.set_major_formatter(FormatStrFormatter("%.0e"))
# formatter = FormatStrFormatter("%.1f")
# # formatter.set_useOffset(True)
# ax.yaxis.set_major_formatter(formatter)
# # ax.xaxis.get_major_formatter().set_useOffset(False)
filename = sys.argv[0]
save_multiple_formats(filename)
ax = plt.gca()
ax.set_xmargin(1)
plt.show()
| lgpl-3.0 |
aminert/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
marionleborgne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkcairo.py | 69 | 2207 | """
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print 'backend_gtkcairo.%s()' % fn_name()
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKCairo(thisFig)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.ctx = pixmap.cairo_create()
self.ctx.save() # restore, save - when call new_gc()
else:
def set_pixmap (self, pixmap):
self.ctx = cairo.gtk.gdk_cairo_create (pixmap)
self.ctx.save() # restore, save - when call new_gc()
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
| agpl-3.0 |
DeepSpace2/StyleFrame | styleframe/tests/style_frame_tests.py | 1 | 32657 | import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
import os
from functools import partial
from styleframe import Container, StyleFrame, Styler, utils
from styleframe.tests import TEST_FILENAME
class StyleFrameTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.default_styler_obj = Styler(wrap_text=False)
cls.styler_obj_1 = Styler(bg_color=utils.colors.blue, bold=True, font='Impact', font_color=utils.colors.yellow,
font_size=20.0, underline=utils.underline.single,
horizontal_alignment=utils.horizontal_alignments.left,
vertical_alignment=utils.vertical_alignments.center,
comment_author='Author 1', comment_text='styler_obj_1 comment')
cls.styler_obj_2 = Styler(bg_color=utils.colors.yellow,
comment_author='Author 2', comment_text='styler_obj_2 comment')
cls.openpy_style_obj_1 = cls.styler_obj_1.to_openpyxl_style()._style
cls.openpy_style_obj_2 = cls.styler_obj_2.to_openpyxl_style()._style
def setUp(self):
self.ew = StyleFrame.ExcelWriter(TEST_FILENAME)
self.sf = StyleFrame({'a': ['col_a_row_1', 'col_a_row_2', 'col_a_row_3'],
'b': ['col_b_row_1', 'col_b_row_2', 'col_b_row_3']}, self.default_styler_obj)
self.apply_column_style = partial(self.sf.apply_column_style, styler_obj=self.styler_obj_1, width=10)
self.apply_style_by_indexes = partial(self.sf.apply_style_by_indexes, styler_obj=self.styler_obj_1, height=10)
self.apply_headers_style = partial(self.sf.apply_headers_style, styler_obj=self.styler_obj_1)
@classmethod
def tearDownClass(cls):
try:
os.remove(TEST_FILENAME)
except OSError as ex:
print(ex)
def export_and_get_default_sheet(self, save=False):
self.sf.to_excel(excel_writer=self.ew, right_to_left=True, columns_to_hide=self.sf.columns[0],
row_to_add_filters=0, columns_and_rows_to_freeze='A2', allow_protection=True)
if save:
self.ew.save()
return self.ew.sheets['Sheet1']
def get_cf_rules(self, sheet):
conditional_formatting = sheet.conditional_formatting
try:
return conditional_formatting.cf_rules
except AttributeError:
return conditional_formatting
def test_init_styler_obj(self):
self.sf = StyleFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, styler_obj=self.styler_obj_1)
self.assertTrue(all(self.sf.at[index, 'a'].style.to_openpyxl_style()._style == self.openpy_style_obj_1
for index in self.sf.index))
sheet = self.export_and_get_default_sheet()
self.assertTrue(all(sheet.cell(row=i, column=j)._style == self.openpy_style_obj_1
for i in range(2, len(self.sf))
for j in range(1, len(self.sf.columns))))
with self.assertRaises(TypeError):
StyleFrame({}, styler_obj=1)
def test_init_dataframe(self):
self.assertIsInstance(StyleFrame(pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})), StyleFrame)
self.assertIsInstance(StyleFrame(pd.DataFrame()), StyleFrame)
def test_init_styleframe(self):
self.assertIsInstance(StyleFrame(StyleFrame({'a': [1, 2, 3]})), StyleFrame)
with self.assertRaises(TypeError):
StyleFrame({}, styler_obj=1)
def test_len(self):
self.assertEqual(len(self.sf), len(self.sf.data_df))
self.assertEqual(len(self.sf), 3)
def test_str(self):
self.assertEqual(str(self.sf), str(self.sf.data_df))
def test__getitem__(self):
self.assertEqual(self.sf['a'].tolist(), self.sf.data_df['a'].tolist())
self.assertTrue(self.sf.data_df[['a', 'b']].equals(self.sf[['a', 'b']].data_df))
def test__setitem__(self):
self.sf['a'] = range(3)
self.sf['b'] = range(3, 6)
self.sf['c'] = 5
self.sf['d'] = self.sf['a'] + self.sf['b']
self.sf['e'] = self.sf['a'] + 5
self.assertTrue(all(self.sf.applymap(lambda x: isinstance(x, Container)).all()))
def test__getattr__(self):
self.assertEqual(self.sf.fillna, self.sf.data_df.fillna)
self.assertTrue(self.sf['a'].equals(self.sf.a))
with self.assertRaises(AttributeError):
self.sf.non_exisiting_method()
def test_apply_column_style(self):
# testing some edge cases
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
self.sf.apply_column_style(cols_to_style='a', styler_obj=0)
with self.assertRaises(KeyError):
self.sf.apply_column_style(cols_to_style='non_existing_col', styler_obj=Styler())
# actual tests
self.apply_column_style(cols_to_style=['a'])
self.assertTrue(all([self.sf.at[index, 'a'].style.to_openpyxl_style()._style == self.openpy_style_obj_1
and self.sf.at[index, 'b'].style.to_openpyxl_style()._style != self.openpy_style_obj_1
for index in self.sf.index]))
sheet = self.export_and_get_default_sheet()
self.assertEqual(sheet.column_dimensions['A'].width, 10)
# range starts from 2 since we don't want to check the header's style
self.assertTrue(all(sheet.cell(row=i, column=1)._style == self.openpy_style_obj_1 for i in range(2, len(self.sf))))
def test_apply_column_style_no_override_default_style(self):
# testing some edge cases
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
self.sf.apply_column_style(cols_to_style='a', styler_obj=0)
with self.assertRaises(KeyError):
self.sf.apply_column_style(cols_to_style='non_existing_col', styler_obj=Styler())
# actual tests
self.apply_column_style(cols_to_style=['a'], overwrite_default_style=False)
self.assertTrue(all([self.sf.at[index, 'a'].style == Styler.combine(self.default_styler_obj, self.styler_obj_1)
and self.sf.at[index, 'b'].style == self.default_styler_obj
for index in self.sf.index]))
sheet = self.export_and_get_default_sheet()
self.assertEqual(sheet.column_dimensions['A'].width, 10)
# range starts from 2 since we don't want to check the header's style
self.assertTrue(all(sheet.cell(row=i, column=1)._style == Styler.combine(self.default_styler_obj, self.styler_obj_1).to_openpyxl_style()._style
for i in range(2, len(self.sf))))
def test_apply_style_by_indexes_single_col(self):
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
self.sf.apply_style_by_indexes(indexes_to_style=0, styler_obj=0)
self.apply_style_by_indexes(self.sf[self.sf['a'] == 'col_a_row_2'], cols_to_style=['a'])
self.assertTrue(all(self.sf.at[index, 'a'].style.to_openpyxl_style()._style == self.openpy_style_obj_1
for index in self.sf.index if self.sf.at[index, 'a'] == 'col_a_row_2'))
sheet = self.export_and_get_default_sheet()
self.assertTrue(all(sheet.cell(row=i, column=1)._style == self.openpy_style_obj_1 for i in range(1, len(self.sf))
if sheet.cell(row=i, column=1).value == 2))
self.assertEqual(sheet.row_dimensions[3].height, 10)
def test_apply_style_by_indexes_all_cols(self):
self.apply_style_by_indexes(self.sf[self.sf['a'] == 2])
self.assertTrue(all(self.sf.at[index, 'a'].style.to_openpyxl_style()._style == self.openpy_style_obj_1
for index in self.sf.index if self.sf.at[index, 'a'] == 2))
sheet = self.export_and_get_default_sheet()
self.assertTrue(all(sheet.cell(row=i, column=j)._style == self.openpy_style_obj_1
for i in range(1, len(self.sf))
for j in range(1, len(self.sf.columns))
if sheet.cell(row=i, column=1).value == 2))
def test_apply_style_by_indexes_complement_style(self):
self.apply_style_by_indexes(self.sf[self.sf['a'] == 'col_a_row_1'], complement_style=self.styler_obj_2)
self.assertTrue(all(self.sf.at[index, 'a'].style.to_openpyxl_style()._style == self.openpy_style_obj_1
for index in self.sf.index if self.sf.at[index, 'a'] == 'col_a_row_1'))
self.assertTrue(all(self.sf.at[index, 'a'].style.to_openpyxl_style()._style == self.openpy_style_obj_2
for index in self.sf.index if self.sf.at[index, 'a'] != 'col_a_row_1'))
def test_apply_style_by_indexes_with_single_index(self):
self.apply_style_by_indexes(self.sf.index[0])
self.assertTrue(all(self.sf.iloc[0, self.sf.columns.get_loc(col)].style.to_openpyxl_style()._style == self.openpy_style_obj_1
for col in self.sf.columns))
sheet = self.export_and_get_default_sheet()
# row=2 since sheet start from row 1 and the headers are row 1
self.assertTrue(all(sheet.cell(row=2, column=col)._style == self.openpy_style_obj_1
for col in range(1, len(self.sf.columns))))
def test_apply_style_by_indexes_all_cols_with_multiple_indexes(self):
self.apply_style_by_indexes([1, 2])
self.assertTrue(all(self.sf.iloc[index, self.sf.columns.get_loc(col)].style.to_openpyxl_style()._style == self.openpy_style_obj_1
for index in [1, 2]
for col in self.sf.columns))
sheet = self.export_and_get_default_sheet()
self.assertTrue(all(sheet.cell(row=i, column=j)._style == self.openpy_style_obj_1
for i in [3, 4] # sheet start from row 1 and headers are row 1
for j in range(1, len(self.sf.columns))))
def test_apply_headers_style(self):
self.apply_headers_style()
self.assertEqual(self.sf.columns[0].style.to_openpyxl_style()._style, self.openpy_style_obj_1)
sheet = self.export_and_get_default_sheet()
self.assertEqual(sheet.cell(row=1, column=1)._style, self.openpy_style_obj_1)
def test_set_column_width(self):
# testing some edge cases
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
self.sf.set_column_width(columns='a', width='a')
with self.assertRaises(ValueError):
# noinspection PyTypeChecker
self.sf.set_column_width(columns='a', width=-1)
# actual tests
self.sf.set_column_width(columns=['a'], width=20)
self.assertEqual(self.sf._columns_width['a'], 20)
sheet = self.export_and_get_default_sheet()
self.assertEqual(sheet.column_dimensions['A'].width, 20)
def test_set_column_width_dict(self):
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
self.sf.set_column_width_dict(None)
width_dict = {'a': 20, 'b': 30}
self.sf.set_column_width_dict(width_dict)
self.assertEqual(self.sf._columns_width, width_dict)
sheet = self.export_and_get_default_sheet()
self.assertTrue(all(sheet.column_dimensions[col.upper()].width == width_dict[col]
for col in width_dict))
def test_set_row_height(self):
# testing some edge cases
with self.assertRaises(TypeError):
self.sf.set_row_height(rows=[1], height='a')
with self.assertRaises(ValueError):
self.sf.set_row_height(rows=[1], height=-1)
with self.assertRaises(ValueError):
self.sf.set_row_height(rows=['a'], height=-1)
# actual tests
self.sf.set_row_height(rows=[1], height=20)
self.assertEqual(self.sf._rows_height[1], 20)
sheet = self.export_and_get_default_sheet()
self.assertEqual(sheet.row_dimensions[1].height, 20)
def test_set_row_height_dict(self):
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
self.sf.set_row_height_dict(None)
height_dict = {1: 20, 2: 30}
self.sf.set_row_height_dict(height_dict)
self.assertEqual(self.sf._rows_height, height_dict)
sheet = self.export_and_get_default_sheet()
self.assertTrue(all(sheet.row_dimensions[row].height == height_dict[row]
for row in height_dict))
def test_rename(self):
with self.assertRaises(TypeError):
# noinspection PyTypeChecker
self.sf.rename(columns=None)
original_columns_name = list(self.sf.columns)
names_dict = {'a': 'A', 'b': 'B'}
# testing rename with inplace = True
self.sf.rename(columns=names_dict, inplace=True)
self.assertTrue(all(new_col_name in self.sf.columns
for new_col_name in names_dict.values()))
new_columns_name = list(self.sf.columns)
# check that the columns order did not change after renaming
self.assertTrue(all(original_columns_name.index(old_col_name) == new_columns_name.index(new_col_name)
for old_col_name, new_col_name in names_dict.items()))
# using the old name should raise a KeyError
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
self.sf['a']
# testing rename with inplace = False
names_dict = {v: k for k, v in names_dict.items()}
new_sf = self.sf.rename(columns=names_dict, inplace=False)
self.assertTrue(all(new_col_name in new_sf.columns
for new_col_name in names_dict.values()))
# using the old name should raise a KeyError
with self.assertRaises(KeyError):
# noinspection PyStatementEffect
new_sf['A']
def test_read_excel_no_style(self):
self.export_and_get_default_sheet(save=True)
sf_from_excel = StyleFrame.read_excel(TEST_FILENAME)
# making sure content is the same
self.assertTrue(all(list(self.sf[col]) == list(sf_from_excel[col]) for col in self.sf.columns))
def test_read_excel_with_string_sheet_name(self):
self.export_and_get_default_sheet(save=True)
sf_from_excel = StyleFrame.read_excel(TEST_FILENAME, read_style=True, sheet_name='Sheet1',
use_openpyxl_styles=True)
# making sure content is the same
self.assertTrue(all(list(self.sf[col]) == list(sf_from_excel[col]) for col in self.sf.columns))
rows_in_excel = sf_from_excel.data_df.itertuples()
rows_in_self = self.sf.data_df.itertuples()
# making sure styles are the same
self.assertTrue(all(self_cell.style == Styler.from_openpyxl_style(excel_cell.style, [])
for row_in_excel, row_in_self in zip(rows_in_excel, rows_in_self)
for excel_cell, self_cell in zip(row_in_excel[1:], row_in_self[1:])))
def test_read_excel_with_style_openpyxl_objects(self):
self.export_and_get_default_sheet(save=True)
sf_from_excel = StyleFrame.read_excel(TEST_FILENAME, read_style=True, use_openpyxl_styles=True)
# making sure content is the same
self.assertTrue(all(list(self.sf[col]) == list(sf_from_excel[col]) for col in self.sf.columns))
rows_in_excel = sf_from_excel.data_df.itertuples()
rows_in_self = self.sf.data_df.itertuples()
# making sure styles are the same
self.assertTrue(all(self_cell.style == Styler.from_openpyxl_style(excel_cell.style, [])
for row_in_excel, row_in_self in zip(rows_in_excel, rows_in_self)
for excel_cell, self_cell in zip(row_in_excel[1:], row_in_self[1:])))
def test_read_excel_with_style_openpyxl_objects_and_save(self):
self.export_and_get_default_sheet(save=True)
sf_from_excel = StyleFrame.read_excel(TEST_FILENAME, read_style=True, use_openpyxl_styles=True)
# making sure content is the same
self.assertTrue(all(list(self.sf[col]) == list(sf_from_excel[col]) for col in self.sf.columns))
rows_in_excel = sf_from_excel.data_df.itertuples()
rows_in_self = self.sf.data_df.itertuples()
# making sure styles are the same
self.assertTrue(all(self_cell.style == Styler.from_openpyxl_style(excel_cell.style, [])
for row_in_excel, row_in_self in zip(rows_in_excel, rows_in_self)
for excel_cell, self_cell in zip(row_in_excel[1:], row_in_self[1:])))
sf_from_excel.to_excel(TEST_FILENAME).save()
def test_read_excel_with_style_styler_objects(self):
self.export_and_get_default_sheet(save=True)
sf_from_excel = StyleFrame.read_excel(TEST_FILENAME, read_style=True)
# making sure content is the same
self.assertTrue(all(list(self.sf[col]) == list(sf_from_excel[col]) for col in self.sf.columns))
rows_in_excel = sf_from_excel.data_df.itertuples()
rows_in_self = self.sf.data_df.itertuples()
# making sure styles are the same
self.assertTrue(all(excel_cell.style == self_cell.style
for row_in_excel, row_in_self in zip(rows_in_excel, rows_in_self)
for excel_cell, self_cell in zip(row_in_excel[1:], row_in_self[1:])))
def test_read_excel_with_style_comments_openpyxl_objects(self):
self.export_and_get_default_sheet(save=True)
sf_from_excel = StyleFrame.read_excel(TEST_FILENAME, read_style=True, read_comments=True,
use_openpyxl_styles=True)
# making sure content is the same
self.assertTrue(all(list(self.sf[col]) == list(sf_from_excel[col]) for col in self.sf.columns))
rows_in_excel = sf_from_excel.data_df.itertuples()
rows_in_self = self.sf.data_df.itertuples()
# making sure styles are the same
self.assertTrue(all(self_cell.style == Styler.from_openpyxl_style(excel_cell.style, [])
for row_in_excel, row_in_self in zip(rows_in_excel, rows_in_self)
for excel_cell, self_cell in zip(row_in_excel[1:], row_in_self[1:])))
def test_read_excel_with_style_comments_styler_objects(self):
self.export_and_get_default_sheet(save=True)
sf_from_excel = StyleFrame.read_excel(TEST_FILENAME, read_style=True, read_comments=True)
# making sure content is the same
self.assertTrue(all(list(self.sf[col]) == list(sf_from_excel[col]) for col in self.sf.columns))
rows_in_excel = sf_from_excel.data_df.itertuples()
rows_in_self = self.sf.data_df.itertuples()
# making sure styles are the same
self.assertTrue(all(excel_cell.style == self_cell.style
for row_in_excel, row_in_self in zip(rows_in_excel, rows_in_self)
for excel_cell, self_cell in zip(row_in_excel[1:], row_in_self[1:])))
def test_read_excel_with_style_header_arg_none(self):
self.sf = StyleFrame({0: ['A1', 'A2', 'A3', 'A4', 'A5']})
self.sf.apply_style_by_indexes(self.sf[self.sf.data_df[0].isin(('A2', 'A5'))], Styler(bold=True))
self.export_and_get_default_sheet(save=True)
sf_from_excel = StyleFrame.read_excel(TEST_FILENAME, read_style=True, header=None)
# making sure content is the same
self.assertTrue(all(list(self.sf[col]) == list(sf_from_excel[col])[1:] for col in self.sf.columns))
rows_in_excel = list(sf_from_excel.data_df.itertuples())
rows_in_self = self.sf.data_df.itertuples()
# making sure styles are the same
self.assertTrue(all(excel_cell.style == self_cell.style
for row_in_excel, row_in_self in zip(rows_in_excel[1:], rows_in_self)
for excel_cell, self_cell in zip(row_in_excel[1:], row_in_self[1:])))
def test_read_excel_rows_height(self):
self.sf.set_row_height(rows=1, height=25)
self.sf.set_row_height(rows=2, height=15)
self.export_and_get_default_sheet(save=True)
sf_from_excel = StyleFrame.read_excel(TEST_FILENAME, read_style=True)
# Assert the number of rows with height is the length of our data plus 1 for headers row
self.assertEqual(len(sf_from_excel._rows_height), len(self.sf) + 1)
self.assertEqual(sf_from_excel._rows_height[1], 25)
self.assertEqual(sf_from_excel._rows_height[2], 15)
self.assertEqual(sf_from_excel._rows_height[3], None)
self.assertEqual(sf_from_excel._rows_height[4], None)
def test_read_excel_columns_width(self):
self.sf.set_column_width(columns='a', width=25)
self.sf.set_column_width(columns='b', width=15)
self.export_and_get_default_sheet(save=True)
sf_from_excel = StyleFrame.read_excel(TEST_FILENAME, read_style=True)
# Assert the number of rows with height is the length of our data plus 1 for headers row
self.assertEqual(len(sf_from_excel._columns_width), len(self.sf.columns))
self.assertEqual(sf_from_excel._columns_width['a'], 25)
self.assertEqual(sf_from_excel._columns_width['b'], 15)
def test_read_excel_template_equal_boundaries(self):
template_sf = StyleFrame(
obj={
'a': ['col_a_row_1', 'col_a_row_2', 'col_a_row_3'],
'b': ['col_b_row_1', 'col_b_row_2', 'col_b_row_3']
},
styler_obj=self.styler_obj_1
)
template_sf.index[0].style = self.styler_obj_2
template_sf.to_excel(TEST_FILENAME, index=True).save()
df = pd.DataFrame(
data={
'A': [1, 2, 3],
'B': [3, 2, 1]
},
columns=['A', 'B']
)
sf_from_template = StyleFrame.read_excel_as_template(path=TEST_FILENAME, df=df, use_df_boundaries=False,
index_col=0, read_comments=True)
for template_rows, sf_rows in zip(template_sf.data_df.itertuples(), sf_from_template.data_df.itertuples()):
for template_cell, actual_cell in zip(template_rows, sf_rows):
self.assertEqual(template_cell.style, actual_cell.style,
'Different styles in template cell {template_cell} with style {template_style}'
'\nand actual cell {actual_cell} with style {actual_cell_style}'.format(
template_cell=template_cell, template_style=template_cell.style,
actual_cell=actual_cell, actual_cell_style=actual_cell.style
))
# Assert values are equals to df and not to the original values from template
assert_frame_equal(sf_from_template.data_df, df,
check_index_type=False,
check_dtype=False,
check_column_type=False)
def test_read_excel_template_boundaries_with_more_rows_and_columns_than_df(self):
template_sf = StyleFrame(
obj={
'a': ['col_a_row_1', 'col_a_row_2'],
'b': ['col_b_row_1', 'col_b_row_2']
},
styler_obj=self.styler_obj_1
)
template_sf.to_excel(TEST_FILENAME).save()
df = pd.DataFrame(
data={
'A': [1],
},
columns=['A']
)
sf_from_template = StyleFrame.read_excel_as_template(path=TEST_FILENAME, df=df, use_df_boundaries=False,
read_comments=True)
# Since template is larger than the df and use_df_boundaries is false, 'b' column shouldn't change
# and be left from the original template
self.assertListEqual([col.value for col in sf_from_template.columns], ['A', 'b'])
self.assertEqual(template_sf['a'][0].style, sf_from_template['A'][0].style,
'Different styles in template cell with style {template_style}'
'\nand actual cell with style {actual_cell_style}'.format(
template_style=template_sf['a'][0].style, actual_cell_style=sf_from_template['A'][0].style)
)
self.assertEqual(sf_from_template['A'][0].value, 1)
# Assert extra column equals
self.assertListEqual(list(sf_from_template['b']), list(template_sf['b']))
# Assert extra row exists and equals
self.assertListEqual(list(sf_from_template.iloc[1]), list(template_sf.iloc[1]))
def test_read_excel_template_boundaries_with_less_rows_and_columns_than_df(self):
template_sf = StyleFrame(
obj={
'a': ['col_a_row_1', 'col_a_row_2', 'col_a_row_3'],
'b': ['col_b_row_1', 'col_b_row_2', 'col_b_row_3']
},
styler_obj=self.styler_obj_1
)
template_sf.index[0].style = self.styler_obj_2
template_sf.to_excel(TEST_FILENAME, index=True).save()
df = pd.DataFrame(
data={
'A': [1, 2, 3, 4],
'B': [3, 2, 1, 4],
'C': [-1, -2, -3, -4],
},
columns=['A', 'B', 'C']
)
sf_from_template = StyleFrame.read_excel_as_template(path=TEST_FILENAME, df=df, use_df_boundaries=False,
index_col=0, read_comments=True)
for template_rows, sf_rows in zip(template_sf.data_df.itertuples(), sf_from_template.data_df.itertuples()):
for template_cell, actual_cell in zip(template_rows, sf_rows):
self.assertEqual(template_cell.style, actual_cell.style,
'Different styles in template cell {template_cell} with style {template_style}'
'\nand actual cell {actual_cell} with style {actual_cell_style}'.format(
template_cell=template_cell, template_style=template_cell.style,
actual_cell=actual_cell, actual_cell_style=actual_cell.style
))
# Assert values are equals to df and not to the original values from template
assert_frame_equal(sf_from_template.data_df, df,
check_index_type=False,
check_dtype=False,
check_column_type=False)
def test_read_excel_template_with_use_df_boundaries(self):
template_sf = StyleFrame(
obj={
'a': ['col_a_row_1', 'col_a_row_2'],
'b': ['col_b_row_1', 'col_b_row_2']
},
styler_obj=self.styler_obj_1
)
template_sf.to_excel(TEST_FILENAME).save()
df = pd.DataFrame(
data={
'A': [1],
},
columns=['A']
)
sf_from_template = StyleFrame.read_excel_as_template(path=TEST_FILENAME, df=df, use_df_boundaries=True,
read_comments=True)
self.assertListEqual([col.value for col in sf_from_template.columns], ['A'])
self.assertEqual(len(df), len(sf_from_template))
expected_cell_style = template_sf['a'][0].style
actual_cell_style = sf_from_template['A'][0].style
self.assertEqual(actual_cell_style, expected_cell_style,
'Different styles in template cell with style {template_style}'
'\nand actual cell with style {actual_cell_style}'.format(
template_style=expected_cell_style, actual_cell_style=actual_cell_style)
)
self.assertEqual(sf_from_template['A'][0].value, 1)
def test_row_indexes(self):
self.assertEqual(self.sf.row_indexes, (1, 2, 3, 4))
def test_style_alternate_rows(self):
styles = [self.styler_obj_1, self.styler_obj_2]
openpy_styles = [self.openpy_style_obj_1, self.openpy_style_obj_2]
self.sf.style_alternate_rows(styles)
self.assertTrue(all(self.sf.iloc[index.value, 0].style.to_openpyxl_style() == styles[index.value % len(styles)].to_openpyxl_style()
for index in self.sf.index))
sheet = self.export_and_get_default_sheet()
# sheet start from row 1 and headers are row 1, so need to add 2 when iterating
self.assertTrue(all(sheet.cell(row=i.value + 2, column=1)._style == openpy_styles[i.value % len(styles)]
for i in self.sf.index))
def test_add_color_scale_conditional_formatting_start_end(self):
self.sf.add_color_scale_conditional_formatting(start_type=utils.conditional_formatting_types.percentile,
start_value=0, start_color=utils.colors.red,
end_type=utils.conditional_formatting_types.percentile,
end_value=100, end_color=utils.colors.green)
sheet = self.export_and_get_default_sheet(save=True)
cf_rules = self.get_cf_rules(sheet=sheet)
rules_dict = cf_rules['A1:B4']
self.assertEqual(rules_dict[0].type, 'colorScale')
self.assertEqual(rules_dict[0].colorScale.color[0].rgb, utils.colors.red)
self.assertEqual(rules_dict[0].colorScale.color[1].rgb, utils.colors.green)
self.assertEqual(rules_dict[0].colorScale.cfvo[0].type, utils.conditional_formatting_types.percentile)
self.assertEqual(rules_dict[0].colorScale.cfvo[0].val, 0.0)
self.assertEqual(rules_dict[0].colorScale.cfvo[1].type, utils.conditional_formatting_types.percentile)
self.assertEqual(rules_dict[0].colorScale.cfvo[1].val, 100.0)
def test_add_color_scale_conditional_formatting_start_mid_end(self):
self.sf.add_color_scale_conditional_formatting(start_type=utils.conditional_formatting_types.percentile,
start_value=0, start_color=utils.colors.red,
mid_type=utils.conditional_formatting_types.percentile,
mid_value=50, mid_color=utils.colors.yellow,
end_type=utils.conditional_formatting_types.percentile,
end_value=100, end_color=utils.colors.green)
sheet = self.export_and_get_default_sheet(save=True)
cf_rules = self.get_cf_rules(sheet=sheet)
rules_dict = cf_rules['A1:B4']
self.assertEqual(rules_dict[0].type, 'colorScale')
self.assertEqual(rules_dict[0].colorScale.color[0].rgb, utils.colors.red)
self.assertEqual(rules_dict[0].colorScale.color[1].rgb, utils.colors.yellow)
self.assertEqual(rules_dict[0].colorScale.color[2].rgb, utils.colors.green)
self.assertEqual(rules_dict[0].colorScale.cfvo[0].type, utils.conditional_formatting_types.percentile)
self.assertEqual(rules_dict[0].colorScale.cfvo[0].val, 0.0)
self.assertEqual(rules_dict[0].colorScale.cfvo[1].type, utils.conditional_formatting_types.percentile)
self.assertEqual(rules_dict[0].colorScale.cfvo[1].val, 50.0)
self.assertEqual(rules_dict[0].colorScale.cfvo[2].type, utils.conditional_formatting_types.percentile)
self.assertEqual(rules_dict[0].colorScale.cfvo[2].val, 100.0)
def test_columns_setter(self):
self.sf.columns = ['c', 'd']
self.assertTrue(all(isinstance(col, Container) for col in self.sf.columns))
self.assertEqual([col.value for col in self.sf.columns], ['c', 'd'])
| mit |
larsmans/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
colinsheppard/beam | src/main/python/counts_tools/utils/counts.py | 2 | 30471 | from copy import deepcopy
from collections import defaultdict
import csv
import datetime
import gc
import os
import re
import time
from xml.dom import minidom
import xml.etree.ElementTree as ET
import fiona
import numpy as np
import pandas as pd
import pyproj as pp
import rtree
from shapely.geometry import Polygon, Point, LineString
import shapefile # (Andrew 14/09/16)
__author__ = 'Andrew A Campbell'
######################
# Matching the Links #
######################
def match_links_from_metadata(meta_path, shape_path, EPSG, radius, x_col='Longitude', y_col='Latitude',
station_col='ID', output_file='Matched_censors.csv', link_regex="."):
"""
Wrapper for the old match_links that reads the station locations from a PeMS metadata file that possibly contains
duplicate station_ID, latitude, longitude 3-tuples.
:param meta_path: (str) Path to the PeMS metadata file that contains sensor locations. May have duplicate
station_ID, latitude, longitude 3-tuples.
:param shape_path: (str) Path to shapefile of network
:param EPSG: (str) Numberical EPSG code defining the???
:param radius: (float) Search radius around a sensor for finding candidate sensors to match to it.
:param x_col: (str) Name of column in data_df with x-coordinate values. Default value is 'Longitude'.
:param y_col: (str) Name of column in data_df with y-coordinate values. Default value is 'Latitude'.
:param station_col: (str) Name of column with sensor station id.
:param output_file: (str) Path to ouput csv file. Defaults to writing 'Matched_censors.csv' in the working
directory.
:param link_regex (str) Regex pattern to identify links we want to include in matching. Used to exclude public
transit links.
:return: (list) Writes a csv containing the map of sensors to links. Returns a list of sensors with no coordinate
match.
"""
data_df = pd.read_csv(meta_path)[[station_col, x_col, y_col]].drop_duplicates()
return match_links(data_df, shape_path, EPSG, radius, x_col, y_col, station_col, output_file, link_regex=link_regex)
def match_links(data_df, shape_path, EPSG, radius, x_col='Longitude', y_col='Latitude',
station_col="station", output_file='Matched_censors.csv', link_regex="."):
"""
Matches each sensor to a link in the network.
Code adapted from: https://github.com/neverforgit/UCB_Mobility_Simulation/blob/master/1.%20Code/Y.%20Clustering/Functions_2.py
:param data_df: (DataFrame) Pandas DataFrame containing the sensor locations.
:param shape_path: (str) Path to shapefile of network
:param EPSG: (str) Numberical EPSG code defining the???
:param radius: (float) Search radius around a sensor for finding candidate sensors to match to it.
:param x_col: (str) Name of column in data_df with x-coordinate values. Default value is 'Longitude'.
:param y_col: (str) Name of column in data_df with y-coordinate values. Default value is 'Latitude'.
:param station_col: (str) Name of column with sensor station id.
:param output_file: (str) Path to ouput csv file. Defaults to writing 'Matched_censors.csv' in the working
directory.
:param link_regex (str) Regex pattern to identify links we want to include in matching. Used to exclude public
transit links.
:return: (list) Writes a csv containing the map of sensors to links. Returns a list of sensors with no coordinate
match.
"""
t00 = time.time()
idx = rtree.index.Index()
##
# Loading Sensors
##
convert = pp.Proj(init="epsg:" + EPSG) # 32611 for LA
##
# Preparing Sensors into a list
##
t0 = time.time()
troubled = data_df[pd.isnull(data_df[y_col])]
data_df.dropna(inplace=True)
data_df.reset_index(inplace=True)
#TODO delete the following line if it runs with it commented outWriter.
#data_df.drop('index', axis=1, inplace=True)
data_df['x'], data_df['y'], data_df['Link'], data_df['ds'] = '', '', 0.0, 0.0
data_df[['x', 'y']] = map(convert, data_df[x_col], data_df[y_col])
t = time.time() - t0
print 'time: ', t, '\n'
print 'Sensors Ready...\n'
##
# Loading the Links
##
# Create a ID dicts. That way we can accept Link_IDs that are non-numeric
link_to_num = defaultdict(int) # maps original link_ids to integers
num_to_link = dict() # maps those integers back to original link_ids
t0 = time.time()
input_lines = fiona.open(shape_path)
pattern = re.compile(link_regex) # for filtering outWriter public transit links
for i, row in enumerate(input_lines):
print i, ' Now processing ...', row['properties']['ID']
link_id = row['properties']['ID']
# Skip the link if its id does not match the link_regex pattern
if not pattern.match(link_id):
continue
temp = [row['geometry']['coordinates'][0], row['geometry']['coordinates'][1], row['geometry']['coordinates'][2]]
left = min(temp[0][0], temp[1][0], temp[2][0])
bot = min(temp[0][1], temp[1][1], temp[2][1])
right = max(temp[0][0], temp[1][0], temp[2][0])
top = max(temp[0][1], temp[1][1], temp[2][1])
link_to_num[link_id] += 1
num_to_link[link_to_num[row['properties']['ID']]] = link_id
# idx.insert(int(row['properties']['ID']), coordinates=(left, bot, right, top),
# obj=[row['properties']['ID'], row['geometry']])
idx.insert(link_to_num[link_id], coordinates=(left, bot, right, top),
obj=[row['properties']['ID'], row['geometry']])
t = time.time() - t0
print 'time: ', t, '\n'
print 'Lines Ready... \n'
t0 = time.time()
for i in range(len(data_df)):
print 'Now Matching Sensor: ', data_df.loc[i, station_col], '\n'
p = Point(data_df.loc[i, 'x'], data_df.loc[i, 'y'])
temp = list(idx.nearest((p.coords.xy[0][0], p.coords.xy[1][0]), num_results=5,
objects='raw')) # num_results 5 because apparently rtree approaches outside in, if num_result==1 then it will be 142 rows short
shortest = {}
for entry in temp:
line = LineString(entry[1]['coordinates'])
d = ('%.2f' % p.distance(line))
shortest[d] = entry[0]
ds = ('%.2f' % min(float(e) for e in shortest)) # shortest distance of points
link = shortest[str(ds)] #
if float(ds) <= radius:
data_df.loc[i, ['Link', 'ds']] = link, float(ds)
matched = data_df[data_df['ds'] != 0.0]
duplicate_links = list(set(matched[matched.duplicated('Link') == True]['Link'].values))
print 'Cleaning the duplicate links...'
for link in duplicate_links:
temp = data_df[data_df['Link'] == link]
grouped = temp.groupby('ds') # groupe the distance ONLY in the duplicated sensors
if len(grouped) == 1: # if there is only one group it means it is the same sensor, different loop detectors
pass
else:
m = ('%.2f' % temp['ds'].min()) # find the minimum distance in the instances
drop_id = temp[temp['ds'] > float(m)][station_col] # put their ID's in drop_id
for i in range(len(drop_id.values)): # iterate through the drop_id's
cleaned_df = data_df.drop(data_df[data_df[station_col] == drop_id.values[
i]].index) # drop the rows who have the ID in the drop_ids and put the cleaned data in cleaned_data_df
d = 0
if duplicate_links != []:
matched_cleaned = cleaned_df[cleaned_df['ds'] != 0.0] # only links with matched sensors
matched_cleaned = matched_cleaned.sort_values('Link')
d = 1 # a switch for aesthetic purposes while pritning the progress
print 'Cleaned data ready...'
print 'Matching done, Preparing for CSV...'
t0 = time.time()
if d == 1:
matched_cleaned['Link'] = [lk for lk in matched_cleaned['Link']] # convert to int
matched_cleaned.to_csv(output_file, columns=[station_col, 'Link', 'ds', y_col, x_col], index=False)
else:
matched['Link'] = [lk for lk in matched['Link']] # convert to int
matched.to_csv(output_file, columns=[station_col, 'Link', 'ds', y_col, x_col], index=False)
t = time.time() - t0
print 'time: ', t, '\n'
print 'CSV Ready!'
print 'Overall Time: ', time.time() - t00
print 'Done'
return troubled
##################################################
# Creating counts files from MTC data
##################################################
def create_MTC_counts(mtc_df, matched_df, flow_type, template_file, counts_name, counts_desc, counts_year, out_file='counts.xml'):
"""
Creates a counts.xml file for MATSim to validate simulated counts against based on the MTC's processed PeMS files.
:param mtc_df: (DataFrame) Pandas DataFrame of the raw csv provided by MTC at
https://mtcdrive.app.box.com/share-data
:param matched_df: (DataFrame) Pandas DataFrame containing the mapping of stations to links in the network. This is
a DataFrame of the ouput of mtc_PeMS_tools.match_links. NOTE: the station id must be used as the index
:param flow_type: (str) Flags whether to create a counts file of mean or median hourly values. Acceptable values are
"mean" or "median".
:param template_file: (str) Path to template of counts.xml output.
:param counts_name: (str) Name of counts data set to be used in root attribute of counts.xml
:param counts_desc: (str) Very short description to be used in the root attribute of counts.xml
:param counts_year: (int) Year of counts data to be used in root attributes of counts.xml
:param out_file: (str) Path and name of output counts file. Defaults to counts.xml
:return: ([DataFrame, DataFrame]) Returns the ouput of the call to count_ts. The first DataFrame is the desired
time series output. The second is a DataFrame of stations that were excluded and the reason why.
"""
# Create filtere time series for each station
ts_df, filter_df = counts_ts(mtc_df, matched_df, flow_type)
# Initialize the ElementTree using the template. Update the attributes for the root, <counts>
tree = ET.parse(template_file) # weekday count file
root = tree.getroot()
root.attrib['name'] = counts_name
root.attrib['desc'] = counts_desc
root.attrib['year'] = str(counts_year)
count0 = root.getchildren()[0]
# Add placeholder <count> blocks for all rows in ts_df
for row in np.arange(ts_df.shape[0] - 1):
root.append(deepcopy(count0))
# Iterate through each sensor in ts_df and update the values of a new <count> block
for i, row in enumerate(ts_df.iterrows()):
ci = root.getchildren()[i] # i'th count block
# Update the attributes for <count>
ci.attrib['loc_id'] = str(int(matched_df.loc[row[0]].loc['Link'])) # Add the link id
ci.attrib['cs_id'] = str(row[0]) # Add the sensor station id
# Iterate to update the volume vals
for j, volume in enumerate(ci.getchildren()):
volume.attrib['val'] = str(row[1][j])
# Write the xml to the output file
tree.write(out_file)
return ts_df, filter_df
def get_TS_summary_dates(summary_path, strpfrmt='%m/%d/%Y'):
"""
:param summary_path: (str) Path to Time Series summary
:param strpfrmt: (str) Format string for reading dates
:returns (datetime, datetime)
"""
df = pd.read_csv(summary_path)
start = datetime.datetime.strptime(df['First_Day'][0], strpfrmt)
end = datetime.datetime.strptime(df['Last_Day'][0], strpfrmt)
return (start, end)
def counts_ts(mtc_df, matched_df, flow_type, stat_id='station'):
"""
Takes a DataFrame of the raw csv provided by MTC. Returns an N x 25 data frame where the first column is the station
id number and the other 24 are the hourly counts.
:param mtc_df: (DataFrame) Pandas DataFrame of the raw csv provided by MTC at
https://mtcdrive.app.box.com/share-data
:param matched_df: (DataFrame) Pandas DataFrame containing the mapping of stations to links in the network. This is
a DataFrame of the ouput of mtc_PeMS_tools.match_links
:param flow_type: (str) Flags whether to create a counts file of mean or median hourly values. Acceptable values are
"avg_flow" or "median_flow".
:param stat_id: (str) Name of column with the station ids.
:return: ([DataFrame, DataFrame]) The first DataFrame is the desired time series output. The second is a DataFrame of
stations that were excluded and the reason why.
"""
stations = pd.unique(mtc_df[stat_id]) # array of unique station ids
columns = [str(x) for x in np.arange(0, 24)]
ts_df = pd.DataFrame(index=stations, columns=columns) # empty placeholder DataFrame
filter_dict = {}
for stat in stations:
# Check for missing values and for a link match
match = stat in matched_df[stat_id].values
stat_ts = mtc_df[mtc_df[stat_id] == stat][['hour', flow_type]].sort('hour')
if not match:
filter_dict[stat] = 'No match'
ts_df.drop(stat, inplace=True)
continue
elif stat_ts.shape[0] != 24:
ts_df.drop(stat, inplace=True)
filter_dict[stat] = 'Missing values'
continue
else:
ts_df.loc[stat] = stat_ts[flow_type].values
# Convert filter_dict to a DataFrame and return
filter_df = pd.DataFrame.from_dict(filter_dict, orient='index')
filter_df.columns = ['reason']
return ts_df, filter_df
##################################################
# Filtering Counts Sensor Stations
##################################################
##################################################
# Creating counts files from PeMS_Tools data
##################################################
#TODO verify that we can simply use create_PeMS_Tools_counts_multiday to do single day counts, then delete the single day version below and rename this one to something more general (AAC Spring 16)
#TODO remove the filtering by missing data. Add a qualified Station ID list as input
def create_PeMS_Tools_counts_multiday(station_TS_dir, stat_link_map_file, date_list, counts_name, counts_desc, counts_year, filter_list, aggregation='mean', _list=None, out_file='counts', printerval=100):
"""
:param station_TS_dir: (str) Path to directory with station Time Series. This must be the output of a call to
PeMS_Tools.utilities.station.generate_time_series_V2.
:param stat_link_map_file: (str) CSV file mapping station_ID to MATSim Link_ID.
:param date_list: ([str]) List of date strings. Each data should follow the '%m/%d/%Y' format (e.g. 12/31/199)
:param counts_name: (str) Name of counts data set to be used in root attribute of counts.xml
:param counts_desc: (str) Very short description to be used in the root attribute of counts.xml
:param counts_year: (int) Year of counts data to be used in root attributes of counts.xml
:param filter_list: ([str]) If not None, only PeMS station IDs in this list will be used.
:param aggregation: ([str]) List of Pandas methods of aggregation to use(mean, median or std only acceptable values).
:param out_file: (str) Path and name of output counts file. Defaults to counts.xml
:return: ([DataFrame, DataFrame]) Returns the ouput of the call to count_ts. The first DataFrame is the desired
time series output. The second is a DataFrame of stations that were excluded and the reason why.
"""
# Initialize the ElementTree
tree = create_template_tree()
root = tree.getroot()
root.attrib['name'] = counts_name
root.attrib['desc'] = counts_desc
root.attrib['year'] = str(counts_year)
count0 = deepcopy(root.getchildren()[0])
root.remove(list(root)[0]) # remove the one dummy count element
# Get the station_ID to link_ID lookup
id_map = pd.read_csv(stat_link_map_file, index_col='ID', dtype='string')
id_map.index = [str(i) for i in id_map.index] # convert index back to string for easy lookups below
# Move through all the time series directories and add the sensor if data available.
orig_dir = os.getcwd()
os.chdir(station_TS_dir)
stations = [n for n in os.listdir('.') if n.isdigit()] # list of all station folder names
used_links = []
if filter_list:
stations = [stat for stat in stations if stat in filter_list]
for i, stat in enumerate(stations):
if i % printerval == 0:
print 'Processing station: %s (%s of %s)' % (stat, i, len(stations))
if id_map.loc[stat]['Link'] in used_links:
print '%s has a duplicate link id, skipping' % stat
continue
start, end = get_TS_summary_dates('./%s/summary.csv' % stat)
if stat not in id_map.index: # Check if the station was successfully mapped to a link
continue
if not((start < datetime.datetime.strptime(date_list[0], '%m/%d/%Y')) & (datetime.datetime.strptime(date_list[-1], '%m/%d/%Y')< end)): # Check if this station was active during target periods
continue # Skip this station
df = pd.read_csv('./%s/time_series.csv' % stat, index_col='Timestamp')
df['date'] = [d[0:10] for d in df.index]
df['hour'] = [d[-8:-6] for d in df.index]
# Make a small df of only days matching the target day date. It is much much faster to resample the smaller one
vol_5min = df[df['date'].isin(date_list)][['Total_Flow', 'hour']] # all 5-min observations on desired dates
vol_5min.index = pd.to_datetime(vol_5min.index)
vol_hourly = vol_5min.resample('1h', how='sum') # Rollup the 5-minute counts to 1-hr
vol_hourly['hour'] = [d.hour for d in vol_hourly.index]
# Now we need to groupby the hour and take the mean
hr_means = vol_hourly.groupby('hour').mean() # Mean hourly flows for all days in date_list!!!
#TODO perhaps we should run this filter earlier. Imagine we have one hour with one observation on one day and the sensor is off for all others.
if hr_means['Total_Flow'].isnull().any(): # skip if any hours are missing
continue
# Add the counts to the ElementTree
link_id = id_map.loc[stat]['Link']
used_links.append(link_id)
augment_counts_tree(tree, hr_means['Total_Flow'].values, stat, link_id, count0)
if i%100: # garbage collect every 100 iterations
gc.collect()
# tree.write(out_file, encoding='UTF-8')
pretty_xml = prettify(tree.getroot())
with open(out_file, 'w') as fo:
fo.write(pretty_xml)
os.chdir(orig_dir)
def create_PeMS_Tools_counts_measures(station_TS_dir, stat_link_map_file, date_list, counts_name, counts_desc, counts_year, filter_list, aggregation_list, _list=None, out_prefix='counts'):
"""
:param station_TS_dir: (str) Path to directory with station Time Series. This must be the output of a call to
PeMS_Tools.utilities.station.generate_time_series_V2.
:param stat_link_map_file: (str) CSV file mapping station_ID to MATSim Link_ID.
:param date_list: ([str]) List of date strings. Each data should follow the '%m/%d/%Y' format (e.g. 12/31/199)
:param counts_name: (str) Name of counts data set to be used in root attribute of counts.xml
:param counts_desc: (str) Very short description to be used in the root attribute of counts.xml
:param counts_year: (int) Year of counts data to be used in root attributes of counts.xml
:param filter_list: ([str]) If not None, only PeMS station IDs in this list will be used.
:param aggregation_list: ([str]) List of Pandas methods of aggregation to use(mean, median or std only acceptable values).
:param out_prefix: (str) Path and name of output counts file. Defaults to counts.xml
:return: ([DataFrame, DataFrame]) Returns the ouput of the call to count_ts. The first DataFrame is the desired
time series output. The second is a DataFrame of stations that were excluded and the reason why.
"""
# Create a list of trees, one for each aggregation measure
tree_list = []
for agg in aggregation_list:
# Initialize the ElementTree
tree = create_template_tree()
root = tree.getroot()
root.attrib['name'] = agg + " - " + counts_name
root.attrib['desc'] = agg + " - " + counts_desc
root.attrib['year'] = str(counts_year)
count0 = deepcopy(root.getchildren()[0])
root.remove(list(root)[0]) # remove the one dummy count element
tree_list.append(tree)
# Get the station_ID to link_ID lookup
id_map = pd.read_csv(stat_link_map_file, index_col='ID', dtype='string')
id_map.index = [str(i) for i in id_map.index] # convert index back to string for easy lookups below
# Move through all the time series directories and add the sensor if data available.
orig_dir = os.getcwd()
os.chdir(station_TS_dir)
stations = [n for n in os.listdir('.') if n.isdigit()] # list of all station folder names
if filter_list:
stations = [stat for stat in stations if stat in filter_list]
for i, stat in enumerate(stations):
print 'Processing station: %s' % stat
start, end = get_TS_summary_dates('./%s/summary.csv' % stat)
if stat not in id_map.index: # Check if the station was successfully mapped to a link
continue
if not((start < datetime.datetime.strptime(date_list[0], '%m/%d/%Y')) & (datetime.datetime.strptime(date_list[-1], '%m/%d/%Y')< end)): # Check if this station was active during target periods
continue # Skip this station
df = pd.read_csv('./%s/time_series.csv' % stat, index_col='Timestamp')
df['date'] = [d[0:10] for d in df.index]
df['hour'] = [d[-8:-6] for d in df.index]
# Make a small df of only days matching the target day date. It is much much faster to resample the smaller one
vol_5min = df[df['date'].isin(date_list)][['Total_Flow', 'hour']] # all 5-min observations on desired dates
vol_5min.index = pd.to_datetime(vol_5min.index)
vol_hourly = vol_5min.resample('1h', how='sum') # Rollup the 5-minute counts to 1-hr
vol_hourly['hour'] = [d.hour for d in vol_hourly.index]
# Now we need to groupby the hour and take the measures
for i, agg in enumerate(aggregation_list):
# 1 - generate the aggregation
if agg == 'mean':
hr_agg = vol_hourly.groupby('hour').mean() # Mean hourly flows for all days in date_list!!!
elif agg == 'median':
hr_agg = vol_hourly.groupby('hour').median() # Median hourly flows for all days in date_list!!!
elif agg == 'std':
hr_agg = vol_hourly.groupby('hour').std() # Standard dev of hourly flows for all days in date_list!!!
#TODO perhaps we should run this filter earlier. Imagine we have one hour with one observation on one day and the sensor is off for all others.
if hr_agg['Total_Flow'].isnull().any(): # skip if any hours are missing
continue
# Add the counts to the ElementTree
tree = tree_list[i]
link_id = id_map.loc[stat]['Link']
augment_counts_tree(tree, hr_agg['Total_Flow'].values, stat, link_id, count0)
if i%100: # garbage collect every 100 iterations
gc.collect()
for i, tree in enumerate(tree_list):
# tree.write(out_file, encoding='UTF-8')
pretty_xml = prettify(tree.getroot())
with open(out_prefix + "_" + aggregation_list[i] + ".txt", 'w') as fo:
fo.write(pretty_xml)
os.chdir(orig_dir)
##################################################
# XML ET tools
##################################################
#TODO all this xml stuff should go into its own class: class CountTree(ElementTree)
def augment_counts_tree(counts_tree, data, station_ID, link_ID, empty_count):
"""
Add data for a new detector station (sensor) to an existing XML tree.
:param counts_tree: (xml.etree.ElementTree) Tree of MATSim counts.
:param data: (list-like) Contains 24 elements. Each is the hourly mean or median flow.
:param station_ID: (str) ID of the physical detector station.
:param link_ID: (str) ID of the MATSim network link that the station maps to.
:param empty_count: (xml.etree.ElementTree.Element)
"""
new_count = deepcopy(empty_count)
# Add the IDs and volumes to the new 'count' element
new_count.attrib['loc_id'] = link_ID
new_count.attrib['cs_id'] = station_ID
for i, vol in enumerate(list(new_count)):
vol.attrib['val'] = str(int(data[i]))
counts_tree.getroot().append(new_count) # add to the tree
return counts_tree
def create_template_tree():
"""
Creates an empty counts ElementTree.
"""
root = ET.Element('counts')
# root.set('version', '1.0')
# root.set('encoding', 'UTF-8')
root_atts = vd = {'xmlns:xsi': "http://www.w3.org/2001/XMLSchema-instance",
'xsi:noNamespaceSchemaLocation': "http://matsim.org/files/dtd/counts_v1.xsd",
'name': "name of the dataset",
'desc': "describe the dataset",
'year': "yyyy",
'layer': "0"}
root.attrib = root_atts
ET.SubElement(root, 'count')
ct = list(root)[0]
for hr in np.arange(1,25):
vol = ET.Element('volume', {'h': str(hr), 'val': ''})
ct.append(vol)
return ET.ElementTree(root)
def df_from_counts(counts_path):
"""
:param counts_path: (str) Path to a MATSim counts.xml file
:returns (DataFrame) Rows are hours (0:24) and cols are sensor stations.
"""
tree = ET.parse(counts_path)
counts = tree.getroot()
n_stats = len(counts.getchildren()) # number of unique sensor stations
stat_dfs = {}
for count in counts.getchildren():
stat_dfs[count.attrib['cs_id']] = [int(v.attrib['val']) for v in count.getchildren()] # each count element is a unique station
return pd.DataFrame(stat_dfs)
##################################################
# Validation tools
##################################################
def validate_screenline(link_ids, aggr_hours, counts_df, counts_col, facility_name):
"""
:param link_ids: ([int]) List of the IDs of the MATSim network links.
:param aggr_hours: ([int]) Defines the hours to be included in aggregation. Uses 24-hr time. First hour is 1, last
is 24.
:param counts_df: (pd.DataFrame) DataFrame of the MATSim counts validation output. DF has not been processed
significantly after running pd.read_csv.
:param counts_col: (str) Name of the column of counts to aggregate. This is useful if you have added a rescaled
column.
:param facility_name: (str) Name of screenline facility
:return: ([int, int, int, double]) Screenline Facility, Observed, Predicted, Predicted_Less_Obs, Prcnt_Difference
"""
rows = counts_df[np.array([id in link_ids for id in counts_df['Link Id']]) &
np.array([count in aggr_hours for count in counts_df['Hour']])]
observed = np.sum(rows['Count volumes'])
predicted = np.sum(rows[counts_col])
diff = predicted - observed
try:
prct_diff = np.true_divide(diff, observed)
except ZeroDivisionError:
prct_diff = float("inf")
return [facility_name, observed, predicted, diff, prct_diff]
def optimize_counts(df):
"""
Used for optimizing the CountsScaleFactor parameter that MATSim uses for scaling counts.
:param df: (pd.DataFrame) A DF read directly from the run0.*.countscompare.txt file that MATSim
produces.
:return: (float, float, float) The optimized value to scale the CountsScaleFactor by, original RMSE, and new RMSE
after applying the optimized CountsScaleFactor
"""
# Calculate original RMSE
o_numer = np.dot(df['MATSIM volumes'].subtract(df['Count volumes']), df['MATSIM volumes'].subtract(df['Count volumes']))
o_denom = df.shape[0]
o_RMSE = np.sqrt(np.true_divide(o_numer, o_denom))
# Optimal value to rescale CountsScaleFactor
alpha = np.true_divide(df['MATSIM volumes'].dot(df['Count volumes']), df['MATSIM volumes'].dot(df['MATSIM volumes']))
# Rescaled RMSE
r_numer = np.dot(alpha * df['MATSIM volumes'] - df['Count volumes'],
alpha * df['MATSIM volumes'] - df['Count volumes'])
r_RMSE = np.sqrt(np.true_divide(r_numer, o_denom))
return alpha, o_RMSE, r_RMSE
##
# Helpers
##
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
Curtosy of: https://pymotw.com/2/xml/etree/ElementTree/create.html
"""
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def date_string_list(start_date, end_date, weekdays, dt_format='%m/%d/%Y'):
"""
:param start_date: (datetime.datetime) First date in range
:param end_date: (datetime.datetime) Last date in range
:param weekdays: ([int]) Days of week to use (Monday = 0)
:param dt_format: (str) Date format string. Conforms to the strftime grammar.
:returns ([str]) List of date strings. Default is '%m/%d/%Y'
"""
all_dates = pd.date_range(start_date, end_date)
date_list = []
for d in all_dates:
if d.weekday() in weekdays:
date_list.append(datetime.datetime.strftime(d, dt_format))
return date_list
def get_datetime_range(start_date, end_date, time_delta):
"""
:param start_date: (datetime.datetime) First date (included)
:param end_date: (datetime.datetime) Last date (not included)
:param time_delta: (datetime.timedelta) Increment
:returns ([datetime.datetime]) All datetimes from start_date up to, but not including, end_date
"""
dt = start_date
out = []
while dt < end_date:
out.append(dt)
dt += time_delta
return out
def get_24_index():
"""
Returns a list of 24 datetime.datetimes that are at 1hr intervals. Has year and day values because the datetime
it has to.
"""
start = datetime.datetime(1999, 12, 31, 0)
end = datetime.datetime(2000, 01, 01, 0)
td = datetime.timedelta(0, 3600)
return get_datetime_range(start + td, end + td, td)
| gpl-3.0 |
rsivapr/scikit-learn | sklearn/manifold/tests/test_isomap.py | 31 | 3991 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
jrings/chicagodatacomp | chidat/pull_data.py | 1 | 1988 | import pandas as pd
import requests
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("chidat")
class PullDataError(BaseException):
pass
class PullData(object):
"""Queries Chicago public data
"""
def __init__(self):
self.base_url = "http://data.cityofchicago.org/views"
def pull(self, url, max_rows=None, return_type='frame'):
"""Pulls a csv of rows from the Chicago data portal based on the url, which can be found by visiting the page of the dataset on the web portal, selecting Export->SODA API
**Parameters**
* url <str> - URL to pull from
* max_rows <int|None>: Maximum number of rows to request. If None, will try to pull as many as possible
* return_type <str|"frame">: Return type is either a pandas DataFrame ("frame") or a list of dictionaries ("list")
"""
if return_type not in ["frame", "list"]:
raise PullDataError("return_type must be either 'frame' or 'list'")
data = []
offset = 0
limit = 1000 #That's what the API allows
while True:
log.debug("Pulling {} records from offset {}".format(limit, offset))
req_url = url + "?$limit={}&$offset={}&$order=:id".format(limit, offset)
r = requests.get(req_url)
if r.status_code != 200:
log.info("Request resulted in status code {}, finished pulling".format({r.status_code}))
break
data.extend(r.json())
offset += limit
if max_rows and offset >= max_rows:
log.debug("Maximum number of rows ({}) pulled".format(max_rows))
break
if not data:
raise PullDataError("Pull failed, got no data")
log.info("Got {} rows".format(len(data)))
if return_type == "list":
return data
else:
return pd.DataFrame(data)
| unlicense |
radicalbit/incubator-zeppelin | spark/src/main/resources/python/zeppelin_pyspark.py | 16 | 12106 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
import ast
import warnings
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, Row
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(dict):
def __init__(self, zc):
self.z = zc
self._displayhook = lambda *args: None
def show(self, obj):
from pyspark.sql import DataFrame
if isinstance(obj, DataFrame):
print(self.z.showData(obj._jdf))
else:
print(str(obj))
# By implementing special methods it makes operating on it more Pythonic
def __setitem__(self, key, item):
self.z.put(key, item)
def __getitem__(self, key):
return self.z.get(key)
def __delitem__(self, key):
self.z.remove(key)
def __contains__(self, item):
return self.z.containsKey(item)
def add(self, key, value):
self.__setitem__(key, value)
def put(self, key, value):
self.__setitem__(key, value)
def get(self, key):
return self.__getitem__(key)
def getInterpreterContext(self):
return self.z.getInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.input(name, defaultValue)
def select(self, name, options, defaultValue=""):
# auto_convert to ArrayList doesn't match the method signature on JVM side
tuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
iterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(tuples)
return self.z.select(name, defaultValue, iterables)
def checkbox(self, name, options, defaultChecked=None):
if defaultChecked is None:
defaultChecked = []
optionTuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
optionIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(optionTuples)
defaultCheckedIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(defaultChecked)
checkedItems = gateway.jvm.scala.collection.JavaConversions.seqAsJavaList(self.z.checkbox(name, defaultCheckedIterables, optionIterables))
result = []
for checkedItem in checkedItems:
result.append(checkedItem)
return result;
def registerHook(self, event, cmd, replName=None):
if replName is None:
self.z.registerHook(event, cmd)
else:
self.z.registerHook(event, cmd, replName)
def unregisterHook(self, event, replName=None):
if replName is None:
self.z.unregisterHook(event)
else:
self.z.unregisterHook(event, replName)
def getHook(self, event, replName=None):
if replName is None:
return self.z.getHook(event)
return self.z.getHook(event, replName)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72, fontsize=10,
interactive=True, format='png', context=self.z)
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def __tupleToScalaTuple2(self, tuple):
if (len(tuple) == 2):
return gateway.jvm.scala.Tuple2(tuple[0], tuple[1])
else:
raise IndexError("options must be a list of tuple of 2")
class SparkVersion(object):
SPARK_1_4_0 = 10400
SPARK_1_3_0 = 10300
SPARK_2_0_0 = 20000
def __init__(self, versionNumber):
self.version = versionNumber
def isAutoConvertEnabled(self):
return self.version >= self.SPARK_1_4_0
def isImportAllPackageUnderSparkSql(self):
return self.version >= self.SPARK_1_3_0
def isSpark2(self):
return self.version >= self.SPARK_2_0_0
class PySparkCompletion:
def __init__(self, interpreterObject):
self.interpreterObject = interpreterObject
def getGlobalCompletion(self):
objectDefList = []
try:
for completionItem in list(globals().keys()):
objectDefList.append(completionItem)
except:
return None
else:
return objectDefList
def getMethodCompletion(self, text_value):
execResult = locals()
if text_value == None:
return None
completion_target = text_value
try:
if len(completion_target) <= 0:
return None
if text_value[-1] == ".":
completion_target = text_value[:-1]
exec("{} = dir({})".format("objectDefList", completion_target), globals(), execResult)
except:
return None
else:
return list(execResult['objectDefList'])
def getCompletion(self, text_value):
completionList = set()
globalCompletionList = self.getGlobalCompletion()
if globalCompletionList != None:
for completionItem in list(globalCompletionList):
completionList.add(completionItem)
if text_value != None:
objectCompletionList = self.getMethodCompletion(text_value)
if objectCompletionList != None:
for completionItem in list(objectCompletionList):
completionList.add(completionItem)
if len(completionList) <= 0:
self.interpreterObject.setStatementsFinished("", False)
else:
result = json.dumps(list(filter(lambda x : not re.match("^__.*", x), list(completionList))))
self.interpreterObject.setStatementsFinished(result, False)
client = GatewayClient(port=int(sys.argv[1]))
sparkVersion = SparkVersion(int(sys.argv[2]))
if sparkVersion.isSpark2():
from pyspark.sql import SparkSession
else:
from pyspark.sql import SchemaRDD
if sparkVersion.isAutoConvertEnabled():
gateway = JavaGateway(client, auto_convert = True)
else:
gateway = JavaGateway(client)
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
intp = gateway.entry_point
output = Logger()
sys.stdout = output
sys.stderr = output
intp.onPythonScriptInitialized(os.getpid())
jsc = intp.getJavaSparkContext()
if sparkVersion.isImportAllPackageUnderSparkSql():
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
else:
java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
java_import(gateway.jvm, "scala.Tuple2")
_zcUserQueryNameSpace = {}
jconf = intp.getSparkConf()
conf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)
sc = _zsc_ = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
_zcUserQueryNameSpace["_zsc_"] = _zsc_
_zcUserQueryNameSpace["sc"] = sc
if sparkVersion.isSpark2():
spark = __zSpark__ = SparkSession(sc, intp.getSparkSession())
sqlc = __zSqlc__ = __zSpark__._wrapped
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = __zSqlc__
_zcUserQueryNameSpace["spark"] = spark
_zcUserQueryNameSpace["__zSpark__"] = __zSpark__
else:
sqlc = __zSqlc__ = SQLContext(sparkContext=sc, sqlContext=intp.getSQLContext())
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = sqlc
sqlContext = __zSqlc__
_zcUserQueryNameSpace["sqlContext"] = sqlContext
completion = __zeppelin_completion__ = PySparkCompletion(intp)
_zcUserQueryNameSpace["completion"] = completion
_zcUserQueryNameSpace["__zeppelin_completion__"] = __zeppelin_completion__
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext())
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["z"] = z
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
jobGroup = req.jobGroup()
jobDesc = req.jobDescription()
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
if stmts:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
sc.setJobGroup(jobGroup, jobDesc)
code = compile('\n'.join(stmts), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
intp.setStatementsFinished("", False)
except Py4JJavaError:
# raise it to outside try except
raise
except:
exception = traceback.format_exc()
m = re.search("File \"<stdin>\", line (\d+).*", exception)
if m:
line_no = int(m.group(1))
intp.setStatementsFinished(
"Fail to execute line {}: {}\n".format(line_no, stmts[line_no - 1]) + exception, True)
else:
intp.setStatementsFinished(exception, True)
else:
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
kazemakase/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
EarToEarOak/RTLSDR-Scanner | rtlsdr_scanner/plot_time.py | 1 | 4608 | #
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import threading
import time
from matplotlib.ticker import ScalarFormatter, AutoMinorLocator
from rtlsdr_scanner.events import post_event, EventThread, Event
from rtlsdr_scanner.utils_mpl import utc_to_mpl, set_date_ticks
class PlotterTime(object):
def __init__(self, notify, figure, settings):
self.notify = notify
self.figure = figure
self.settings = settings
self.plot = None
self.axes = None
self.threadPlot = None
self.__setup_plot()
self.set_grid(self.settings.grid)
def __setup_plot(self):
self.axes = self.figure.add_subplot(111)
self.axes.set_xlabel("Time")
self.axes.set_ylabel('Points')
numFormatter = ScalarFormatter(useOffset=False)
set_date_ticks(self.axes.xaxis, False)
self.axes.yaxis.set_major_formatter(numFormatter)
self.axes.yaxis.set_minor_locator(AutoMinorLocator(10))
now = time.time()
self.axes.set_xlim(utc_to_mpl(now), utc_to_mpl(now - 10))
def draw_measure(self, _measure, _show):
pass
def hide_measure(self):
pass
def scale_plot(self, force=False):
if self.figure is not None and self.plot is not None:
if self.settings.autoT or force:
times = self.plot[0].get_data()[0]
tMin = min(times)
tMax = max(times)
if tMin == tMax:
tMax += utc_to_mpl(10)
self.axes.set_xlim(tMin, tMax)
if self.settings.autoL or force:
self.axes.autoscale(True, 'y', True)
def get_axes(self):
return self.axes
def get_axes_bar(self):
return None
def get_bar(self):
return self.barBase
def get_plot_thread(self):
return self.threadPlot
def set_title(self, title):
self.axes.set_title(title, fontsize='medium')
def set_plot(self, spectrum, extent, _annotate=False):
self.threadPlot = ThreadPlot(self, self.settings, self.axes,
spectrum, extent)
self.threadPlot.start()
return self.threadPlot
def redraw_plot(self):
if self.figure is not None:
post_event(self.notify, EventThread(Event.DRAW))
def clear_plots(self):
set_date_ticks(self.axes.xaxis, False)
children = self.axes.get_children()
for child in children:
if child.get_gid() is not None:
if child.get_gid() == 'plot':
child.remove()
def set_grid(self, on):
self.axes.grid(on)
self.redraw_plot()
def set_axes(self, on):
if on:
self.axes.set_axis_on()
else:
self.axes.set_axis_off()
def close(self):
self.figure.clear()
self.figure = None
class ThreadPlot(threading.Thread):
def __init__(self, parent, settings, axes, data, extent):
threading.Thread.__init__(self)
self.name = "Plot"
self.parent = parent
self.settings = settings
self.axes = axes
self.data = data
self.extent = extent
def run(self):
if self.data is None:
self.parent.threadPlot = None
return
total = len(self.data)
if total > 0:
self.parent.clear_plots()
xs = [utc_to_mpl(x) for x in self.data.keys()]
ys = [len(sweep) for sweep in self.data.values()]
self.parent.plot = self.axes.plot(xs, ys, 'bo', gid='plot')
set_date_ticks(self.axes.xaxis)
self.parent.scale_plot()
self.parent.redraw_plot()
self.parent.threadPlot = None
if __name__ == '__main__':
print 'Please run rtlsdr_scan.py'
exit(1)
| gpl-3.0 |
henridwyer/scikit-learn | sklearn/metrics/__init__.py | 52 | 3394 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
SpectreJan/gnuradio | gr-filter/examples/decimate.py | 58 | 6061 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = filter.firdes.low_pass_2(1, self._fs,
200, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._decim))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._decim
print "Taps per channel: ", tpc
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = blocks.add_cc()
freqs = [10, 20, 2040]
for i in xrange(len(freqs)):
self.signals.append(analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = filter.pfb.decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = filter.fir_filter_ccf(self._decim, self._taps)
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = blocks.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(16,9))
fig2 = pylab.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
gclenaghan/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
kaichogami/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
greatlse/diffusion_analysis_MD_simulations | diffusion_analysis.py | 1 | 18558 | # coding: utf-8
'''
Author: Tyler Reddy
The purpose of this Python module is to provide utility functions for analyzing the diffusion of particles in molecular dynamics simulation trajectories using either linear or anomalous diffusion models.'''
import numpy
import scipy
import scipy.optimize
def fit_anomalous_diffusion_data(time_data_array,MSD_data_array,degrees_of_freedom=2):
'''This function should fit anomalous diffusion data to Equation 1 in [Kneller2011]_, and return appropriate diffusion parameters.
.. math::
MSD = ND_{\\alpha}t^{\\alpha}
An appropriate coefficient (`N` = 2,4,6 for 1,2,3 `degrees_of_freedom`) will be assigned based on the specified `degrees_of_freedom`. The latter value defaults to 2 (i.e., a planar phospholipid bilayer with `N` = 4).
Input data should include arrays of MSD (in Angstroms ** 2) and time values (in ns).
The results are returned in a tuple.
Parameters
----------
time_data_array : array_like
Input array of time window sizes (nanosecond units)
MSD_data_array : array_like
Input array of MSD values (Angstrom ** 2 units; order matched to time_data_array)
degrees_of_freedom : int
The degrees of freedom for the diffusional process (1, 2 or 3; default 2)
Returns
-------
fractional_diffusion_coefficient
The fractional diffusion coefficient (units of Angstrom ** 2 / ns ** alpha)
standard_deviation_fractional_diffusion_coefficient
The standard deviation of the fractional diffusion coefficent (units of Angstrom ** 2 / ns ** alpha)
alpha
The scaling exponent (no dimensions) of the non-linear fit
standard_deviation_alpha
The standard deviation of the scaling exponent (no dimensions)
sample_fitting_data_X_values_nanoseconds
An array of time window sizes (x values) that may be used to plot the non-linear fit curve
sample_fitting_data_Y_values_Angstroms
An array of MSD values (y values) that may be used to plot the non-linear fit curve
Raises
------
ValueError
If the time window and MSD arrays do not have the same shape
References
----------
.. [Kneller2011] Kneller et al. (2011) J Chem Phys 135: 141105.
Examples
--------
Calculate fractional diffusion coefficient and alpha from artificial data (would typically obtain empirical data from an MD simulation trajectory):
>>> import diffusion_analysis
>>> import numpy
>>> artificial_time_values = numpy.arange(10)
>>> artificial_MSD_values = numpy.array([0.,1.,2.,2.2,3.6,4.7,5.8,6.6,7.0,6.9])
>>> results_tuple = diffusion_analysis.fit_anomalous_diffusion_data(artificial_time_values,artificial_MSD_values)
>>> D, D_std, alpha, alpha_std = results_tuple[0:4]
>>> print D, D_std, alpha, alpha_std
0.268426206526 0.0429995249239 0.891231967011 0.0832911559401
Plot the non-linear fit data:
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> sample_fit_x_values, sample_fit_y_values = results_tuple[4:]
>>> p = plt.plot(sample_fit_x_values,sample_fit_y_values,'-',artificial_time_values,artificial_MSD_values,'.')
.. image:: example_nonlinear.png
'''
if time_data_array.shape != MSD_data_array.shape:
raise ValueError("The shape of time_data_array must match the shape of MSD_data_array.")
def function_to_fit(time,fractional_diffusion_coefficient,scaling_exponent):
coefficient_dictionary = {1:2,2:4,3:6} #dictionary for mapping degrees_of_freedom to coefficient in fitting equation
coefficient = coefficient_dictionary[degrees_of_freedom]
return coefficient * fractional_diffusion_coefficient * (time ** scaling_exponent) #equation 1 in the above paper with appropriate coefficient based on degrees of freedom
#fit the above function to the data and pull out the resulting parameters
optimized_parameter_value_array, estimated_covariance_params_array = scipy.optimize.curve_fit(function_to_fit,time_data_array,MSD_data_array)
#generate sample fitting data over the range of time window values (user can plot if they wish)
sample_fitting_data_X_values_nanoseconds = numpy.linspace(time_data_array[0],time_data_array[-1],100)
sample_fitting_data_Y_values_Angstroms = function_to_fit(sample_fitting_data_X_values_nanoseconds, *optimized_parameter_value_array)
#could then plot the non-linear fit curve in matplotlib with, for example: axis.plot(sample_fitting_data_X_values_nanoseconds,sample_fitting_data_Y_values_Angstroms,color='black')
#could plot the original data points alongside the fit (MSD vs time) with, for example: axis.scatter(time_data_array,MSD_data_array,color='black')
#extract pertinent values from the scipy curve_fit arrays (D_alpha, alpha, and their standard deviations)
parameter_standard_deviation_array = numpy.sqrt(numpy.diagonal(estimated_covariance_params_array))
fractional_diffusion_coefficient = optimized_parameter_value_array[0]
standard_deviation_fractional_diffusion_coefficient = parameter_standard_deviation_array[0]
alpha = optimized_parameter_value_array[1]
standard_deviation_alpha = parameter_standard_deviation_array[1]
return (fractional_diffusion_coefficient, standard_deviation_fractional_diffusion_coefficient, alpha, standard_deviation_alpha,sample_fitting_data_X_values_nanoseconds,sample_fitting_data_Y_values_Angstroms)
def fit_linear_diffusion_data(time_data_array,MSD_data_array,degrees_of_freedom=2):
'''The linear (i.e., normal, random-walk) MSD vs. time diffusion constant calculation.
The results are returned in a tuple.
Parameters
----------
time_data_array : array_like
Input array of time window sizes (nanosecond units)
MSD_data_array : array_like
Input array of MSD values (Angstrom ** 2 units; order matched to time_data_array)
degrees_of_freedom : int
The degrees of freedom for the diffusional process (1, 2 or 3; default 2)
Returns
-------
diffusion_constant
The linear (or normal, random-walk) diffusion coefficient (units of Angstrom ** 2 / ns)
diffusion_constant_error_estimate
The estimated uncertainty in the diffusion constant (units of Angstrom ** 2 / ns), calculated as the difference in the slopes of the two halves of the data. A similar approach is used by GROMACS g_msd [Hess2008]_.
sample_fitting_data_X_values_nanoseconds
An array of time window sizes (x values) that may be used to plot the linear fit
sample_fitting_data_Y_values_Angstroms
An array of MSD values (y values) that may be used to plot the linear fit
Raises
------
ValueError
If the time window and MSD arrays do not have the same shape
References
----------
.. [Hess2008] Hess et al. (2008) JCTC 4: 435-447.
Examples
--------
Calculate linear diffusion coefficient from artificial data (would typically obtain empirical data from an MD simulation trajectory):
>>> import diffusion_analysis
>>> import numpy
>>> artificial_time_values = numpy.arange(10)
>>> artificial_MSD_values = numpy.array([0.,1.,2.,2.2,3.6,4.7,5.8,6.6,7.0,6.9])
>>> results_tuple = diffusion_analysis.fit_linear_diffusion_data(artificial_time_values,artificial_MSD_values)
>>> D, D_error = results_tuple[0:2]
>>> print D, D_error
0.210606060606 0.07
Plot the linear fit data:
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> sample_fit_x_values, sample_fit_y_values = results_tuple[2:]
>>> p = plt.plot(sample_fit_x_values,sample_fit_y_values,'-',artificial_time_values,artificial_MSD_values,'.')
.. image:: example_linear.png
'''
if time_data_array.shape != MSD_data_array.shape:
raise ValueError("The shape of time_data_array must match the shape of MSD_data_array.")
coefficient_dictionary = {1:2.,2:4.,3:6.} #dictionary for mapping degrees_of_freedom to coefficient in fitting equation
coefficient = coefficient_dictionary[degrees_of_freedom]
x_data_array = time_data_array
y_data_array = MSD_data_array
z = numpy.polyfit(x_data_array,y_data_array,1)
slope, intercept = z
diffusion_constant = slope / coefficient
#estimate error in D constant using g_msd approach:
first_half_x_data, second_half_x_data = numpy.array_split(x_data_array,2)
first_half_y_data, second_half_y_data = numpy.array_split(y_data_array,2)
slope_first_half, intercept_first_half = numpy.polyfit(first_half_x_data,first_half_y_data,1)
slope_second_half, intercept_second_half = numpy.polyfit(second_half_x_data,second_half_y_data,1)
diffusion_constant_error_estimate = abs(slope_first_half - slope_second_half) / coefficient
#use poly1d object for polynomial calling convenience (to provide plotting fit data for user if they want to use it):
p = numpy.poly1d(z)
sample_fitting_data_X_values_nanoseconds = numpy.linspace(time_data_array[0],time_data_array[-1],100)
sample_fitting_data_Y_values_Angstroms = p(sample_fitting_data_X_values_nanoseconds)
return (diffusion_constant, diffusion_constant_error_estimate,sample_fitting_data_X_values_nanoseconds,sample_fitting_data_Y_values_Angstroms)
def centroid_array_production_protein(protein_sel,num_protein_copies):
dictionary_centroid_arrays = {}
full_protein_coord_array = protein_sel.coordinates()
list_individual_protein_coord_arrays = numpy.split(full_protein_coord_array,num_protein_copies)
list_per_protein_centroids = [numpy.average(protein_coord_array,axis=0) for protein_coord_array in list_individual_protein_coord_arrays]
dictionary_centroid_arrays['protein'] = numpy.array(list_per_protein_centroids)
return dictionary_centroid_arrays
def mean_square_displacement_by_species(coordinate_file_path, trajectory_file_path, window_size_frames_list, dict_particle_selection_strings, contiguous_protein_selection=None, num_proteins=None, num_protein_copies = None):
'''Calculate the mean square displacement (MSD) of particles in a molecular dynamics simulation trajectory using the Python `MDAnalysis <http://code.google.com/p/mdanalysis/>`_ package [Michaud-Agrawal2011]_.
Parameters
----------
coordinate_file_path: str
Path to the coordinate file to be used in loading the trajectory.
trajectory_file_path: str or list of str
Path to the trajectory file to be used or ordered list of trajectory file paths.
window_size_frames_list: list
List of window sizes measured in frames. Time values are not used as timesteps and simulation output frequencies can vary.
dict_particle_selection_strings: dict
Dictionary of the MDAnalysis selection strings for each particle set for which MSD values will be calculated separately. Format: {'particle_identifier':'MDAnalysis selection string'}. If a given selection contains more than one particle then the centroid of the particles is used, and if more than one MDAnalysis residue object is involved, then the centroid of the selection within each residue is calculated separately.
contiguous_protein_selection: str
When parsing protein diffusion it may be undesirable to split by residue (i.e., amino acid). You may provide an MDAnalysis selection string for this parameter containing a single type of protein (and no other protein types or other molecules / atoms). This selection string may encompass multiple copies of the same protein which should be specified with the parameter `num_protein_copies`
num_protein_copies: int
The number of protein copies if contiguous_protein_selection is specified. This will allow for the protein coordinates to be split and the individual protein centroids will be used for diffusion analysis.
Returns
-------
dict_MSD_values: dict
Dictionary of mean square displacement data. Contains three keys: MSD_value_dict (MSD values, in Angstrom ** 2), MSD_std_dict (standard deviation of MSD values), frame_skip_value_list (the frame window sizes)
References
----------
.. [Michaud-Agrawal2011] N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. J. Comput. Chem. 32 (2011), 2319–2327
Examples
--------
Extract MSD values from an artificial GROMACS xtc file (results measured in A**2 based on frame window sizes) containing only three amino acid residues.
>>> import diffusion_analysis
>>> import numpy
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> window_size_list_frames = [1,2,4,6]
>>> dict_particle_selection_strings = {'MET':'resname MET','ARG':'resname ARG','CYS':'resname CYS'}
>>> dict_MSD_values = diffusion_analysis.mean_square_displacement_by_species('./test_data/dummy.gro','./test_data/diffusion_testing.xtc',window_size_list_frames,dict_particle_selection_strings)
Plot the results:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> for residue_name in dict_particle_selection_strings.keys():
... p = ax.errorbar(window_size_list_frames,dict_MSD_values['MSD_value_dict'][residue_name],yerr=dict_MSD_values['MSD_std_dict'][residue_name],label=residue_name,fmt='o')
>>> a = ax.set_xlabel('Window size (frames)')
>>> a = ax.set_ylabel('MSD ($\AA^2$)')
>>> a = ax.set_ylim(-10,600)
>>> a = ax.legend(loc=2,numpoints=1)
.. image:: example_MSD_extraction.png
'''
import MDAnalysis
universe_object = MDAnalysis.Universe(coordinate_file_path,trajectory_file_path)
if not contiguous_protein_selection:
MDA_residue_selection_dictionary = {}
for particle_name, selection_string in dict_particle_selection_strings.iteritems():
MDA_selection = universe_object.selectAtoms(selection_string)
MDA_selection_residue_list = MDA_selection.residues #have to break it down by residues, otherwise would end up with centroid of all particles of a given name
list_per_residue_selection_objects = [residue.selectAtoms(selection_string) for residue in MDA_selection_residue_list] #the MDA selection objects PER residue
MDA_residue_selection_dictionary[particle_name] = list_per_residue_selection_objects
def centroid_array_production(current_MDA_selection_dictionary): #actually my modification of this for the github workflow won't work--it will find the centroid of ALL the particles with the same name
'''Produce numpy arrays of centroids organized in a dictionary by particle identifier and based on assignment of particle selections to unique residue objects within MDAnalysis.'''
dictionary_centroid_arrays = {}
for particle_name,list_per_residue_selection_objects in current_MDA_selection_dictionary.iteritems():
list_per_residue_selection_centroids = [residue_selection.centroid() for residue_selection in list_per_residue_selection_objects]
dictionary_centroid_arrays[particle_name] = numpy.array(list_per_residue_selection_centroids)
return dictionary_centroid_arrays
else: #dealing with proteins, where we don't want a breakdown by residue
protein_selection = universe_object.selectAtoms(contiguous_protein_selection)
dict_MSD_values = {'MSD_value_dict':{},'MSD_std_dict':{},'frame_skip_value_list':[]} #for overall storage of MSD average / standard deviation values for this replicate
for MSD_window_size_frames in window_size_frames_list:
list_per_window_average_displacements = []
counter = 0
trajectory_striding_dictionary = {} #store values in a dict as you stride through trajectory
for ts in universe_object.trajectory[::MSD_window_size_frames]:
if counter == 0: #first parsed frame
if not contiguous_protein_selection:
previous_frame_centroid_array_dictionary = centroid_array_production(MDA_residue_selection_dictionary)
else:
previous_frame_centroid_array_dictionary = centroid_array_production_protein(protein_selection, num_protein_copies)
else: #all subsequent frames
if not contiguous_protein_selection:
current_frame_centroid_array_dictionary = centroid_array_production(MDA_residue_selection_dictionary)
else:
current_frame_centroid_array_dictionary = centroid_array_production_protein(protein_selection, num_protein_copies)
for particle_name in current_frame_centroid_array_dictionary.keys():
if not particle_name in trajectory_striding_dictionary.keys(): #create the appropriate entry if this particle types hasn't been parsed yet
trajectory_striding_dictionary[particle_name] = {'MSD_value_list_centroids':[]}
current_delta_array_centroids = previous_frame_centroid_array_dictionary[particle_name] - current_frame_centroid_array_dictionary[particle_name]
square_delta_array_centroids = numpy.square(current_delta_array_centroids)
sum_squares_delta_array_centroids = numpy.sum(square_delta_array_centroids,axis=1)
trajectory_striding_dictionary[particle_name]['MSD_value_list_centroids'].append(numpy.average(sum_squares_delta_array_centroids))
#reset the value of the 'previous' array as you go along:
previous_frame_centroid_array_dictionary = current_frame_centroid_array_dictionary
#print 'frame:', ts.frame
counter += 1
for particle_name, MSD_data_subdictionary in trajectory_striding_dictionary.iteritems():
if not particle_name in dict_MSD_values['MSD_value_dict'].keys(): #initialize subdictionaries as needed
dict_MSD_values['MSD_value_dict'][particle_name] = []
dict_MSD_values['MSD_std_dict'][particle_name] = []
dict_MSD_values['MSD_value_dict'][particle_name].append(numpy.average(numpy.array(trajectory_striding_dictionary[particle_name]['MSD_value_list_centroids'])))
dict_MSD_values['MSD_std_dict'][particle_name].append(numpy.std(numpy.array(trajectory_striding_dictionary[particle_name]['MSD_value_list_centroids'])))
dict_MSD_values['frame_skip_value_list'].append(MSD_window_size_frames)
return dict_MSD_values
| mit |
amacd31/hydromet-toolkit | setup.py | 1 | 1316 | import os
from io import open
import versioneer
from setuptools import setup
setup(
name='hydromet',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Toolkit for manipulating and exploring hydrological and meteorological variables.',
author='Andrew MacDonald',
author_email='[email protected]',
license='BSD',
url='https://github.com/amacd31/hydromet-toolkit',
install_requires=['numpy', 'pandas'],
packages = ['hydromet'],
entry_points = {
'console_scripts': [
'hm = hydromet.console:main',
]
},
test_suite = 'tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/dtypes/test_missing.py | 6 | 12886 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.common import is_scalar
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isna, notna, isnull, notnull,
na_value_for_dtype)
@pytest.mark.parametrize('notna_f', [notna, notnull])
def test_notna_notnull(notna_f):
assert notna_f(1.)
assert not notna_f(None)
assert not notna_f(np.NaN)
with cf.option_context("mode.use_inf_as_na", False):
assert notna_f(np.inf)
assert notna_f(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notna_f(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_na", True):
assert not notna_f(np.inf)
assert not notna_f(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notna_f(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_na", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(notna_f(s), Series))
class TestIsNA(object):
def test_0d_array(self):
assert isna(np.array(np.nan))
assert not isna(np.array(0.0))
assert not isna(np.array(0))
# test object dtype
assert isna(np.array(np.nan, dtype=object))
assert not isna(np.array(0.0, dtype=object))
assert not isna(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isna(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('isna_f', [isna, isnull])
def test_isna_isnull(self, isna_f):
assert not isna_f(1.)
assert isna_f(None)
assert isna_f(np.NaN)
assert float('nan')
assert not isna_f(np.inf)
assert not isna_f(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isna_f(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isna_f(df)
expected = df.apply(isna_f)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isna_f(p)
expected = p.apply(isna_f)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isna_f(p)
expected = p.apply(isna_f)
tm.assert_panel4d_equal(result, expected)
def test_isna_lists(self):
result = isna([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isna([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isna(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isna([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isna_nat(self):
result = isna([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isna(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isna_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isna(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isna_datetime(self):
assert not isna(datetime.now())
assert notna(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notna(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isna(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isna(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isna(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
@pytest.mark.parametrize(
"value, expected",
[(np.complex128(np.nan), True),
(np.float64(1), False),
(np.array([1, 1 + 0j, np.nan, 3]),
np.array([False, False, True, False])),
(np.array([1, 1 + 0j, np.nan, 3], dtype=object),
np.array([False, False, True, False])),
(np.array([1, 1 + 0j, np.nan, 3]).astype(object),
np.array([False, False, True, False]))])
def test_complex(self, value, expected):
result = isna(value)
if is_scalar(result):
assert result is expected
else:
tm.assert_numpy_array_equal(result, expected)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
tm.assert_numpy_array_equal(isna(idx.values), exp)
tm.assert_numpy_array_equal(notna(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(values), exp)
tm.assert_numpy_array_equal(notna(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(['1 days', 'NaT', '2 days'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
tm.assert_numpy_array_equal(isna(idx.values), exp)
tm.assert_numpy_array_equal(notna(idx.values), ~exp)
for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',
'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',
'timedelta64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(values), exp)
tm.assert_numpy_array_equal(notna(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_period(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2012-01'], freq='M')
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isna(idx), exp)
tm.assert_numpy_array_equal(notna(idx), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(idx)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
s = pd.Series(idx, dtype=object)
tm.assert_series_equal(isna(s), exp)
tm.assert_series_equal(notna(s), ~exp)
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1 + 1j], dtype='complex'),
np.array([np.nan, 1 + 1j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype='complex'), np.array(
[np.nan, 1 + 2j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]))
assert not array_equivalent(
np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]),
Float64Index([0, np.nan]))
assert not array_equivalent(
Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]),
DatetimeIndex([0, np.nan]))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
assert array_equivalent(TimedeltaIndex([0, np.nan]),
TimedeltaIndex([0, np.nan]))
assert not array_equivalent(
TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan], tz='US/Eastern'),
DatetimeIndex([0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='US/Eastern'), DatetimeIndex(
[1, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='CET'), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
def test_array_equivalent_compat():
# see gh-13388
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
assert (array_equivalent(m, n, strict_nan=True))
assert (array_equivalent(m, n, strict_nan=False))
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (4, 3)], dtype=[('a', int), ('b', float)])
assert (not array_equivalent(m, n, strict_nan=True))
assert (not array_equivalent(m, n, strict_nan=False))
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (3, 4)], dtype=[('b', int), ('a', float)])
assert (not array_equivalent(m, n, strict_nan=True))
assert (not array_equivalent(m, n, strict_nan=False))
def test_array_equivalent_str():
for dtype in ['O', 'S', 'U']:
assert array_equivalent(np.array(['A', 'B'], dtype=dtype),
np.array(['A', 'B'], dtype=dtype))
assert not array_equivalent(np.array(['A', 'B'], dtype=dtype),
np.array(['A', 'X'], dtype=dtype))
def test_na_value_for_dtype():
for dtype in [np.dtype('M8[ns]'), np.dtype('m8[ns]'),
DatetimeTZDtype('datetime64[ns, US/Eastern]')]:
assert na_value_for_dtype(dtype) is NaT
for dtype in ['u1', 'u2', 'u4', 'u8',
'i1', 'i2', 'i4', 'i8']:
assert na_value_for_dtype(np.dtype(dtype)) == 0
for dtype in ['bool']:
assert na_value_for_dtype(np.dtype(dtype)) is False
for dtype in ['f2', 'f4', 'f8']:
assert np.isnan(na_value_for_dtype(np.dtype(dtype)))
for dtype in ['O']:
assert np.isnan(na_value_for_dtype(np.dtype(dtype)))
| apache-2.0 |
jreeder/avoplot | src/avoplot/series.py | 3 | 19376 | #Copyright (C) Nial Peters 2013
#
#This file is part of AvoPlot.
#
#AvoPlot is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#AvoPlot is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with AvoPlot. If not, see <http://www.gnu.org/licenses/>.
import wx
import os
import numpy
from datetime import datetime
import math
import scipy.optimize
import collections
from avoplot.subplots import AvoPlotXYSubplot
from avoplot import controls
from avoplot import core
from avoplot import subplots
from avoplot import figure
from avoplot import fitting
from avoplot import data_selection
from avoplot.gui import linestyle_editor
from avoplot.persist import PersistentStorage
class DataSeriesBase(core.AvoPlotElementBase):
"""
Base class for all data series.
"""
def __init__(self, name):
super(DataSeriesBase, self).__init__(name)
self.__plotted = False
self._mpl_lines = []
def get_mpl_lines(self):
"""
Returns a list of matplotlib line objects associated with the data
series.
"""
assert self.__plotted, ('Data series must be plotted before you can '
'access the matplotlib lines')
return self._mpl_lines
def get_figure(self):
"""
Returns the AvoPlot figure (avoplot.figure.AvoPlotFigure) object that
the series is contained within, or None if the series does not yet
belong to a figure.
"""
#look up the list of parents recursively until we find a figure object
parent = self.get_parent_element()
while not isinstance(parent, figure.AvoPlotFigure):
if parent is None:
return None
parent = parent.get_parent_element()
#sanity check - there should always be a figure object somewhere
#in the ancestry of a series object.
if isinstance(parent, core.AvoPlotSession):
raise RuntimeError("Reached the root element before an "
"AvoPlotFigure instance was found.")
return parent
def get_subplot(self):
"""
Returns the AvoPlot subplot (subclass of
avoplot.subplots.AvoPlotSubplotBase) object that
the series is contained within, or None if the series does not yet
belong to a subplot.
"""
#look up the list of parents recursively until we find a figure object
parent = self.get_parent_element()
while not isinstance(parent, subplots.AvoPlotSubplotBase):
if parent is None:
return None
parent = parent.get_parent_element()
#sanity check - there should always be a figure object somewhere
#in the ancestry of a series object.
if isinstance(parent, core.AvoPlotSession):
raise RuntimeError("Reached the root element before an "
"AvoPlotFigure instance was found.")
return parent
def delete(self):
"""
Overrides the base class method in order to remove the line(s) from the
axes and draw the changes.
"""
self._mpl_lines.pop(0).remove()
self.update()
super(DataSeriesBase, self).delete()
def _plot(self, subplot):
"""
Called in subplot.add_data_series() to plot the data into the subplot
and setup the controls for the series (the parent of the series is not
known until it gets added to the subplot)
"""
assert not self.__plotted, ('plot() should only be called once')
self.__plotted = True
self._mpl_lines = self.plot(subplot)
#self.setup_controls(subplot.get_figure())
def add_subseries(self, series):
"""
Adds a series as a child of this series. Normally you would expect
series to be parented by subplots, however, for things like fit-lines
it makes more sense for them to be associated with the series that they
are fitting then the subplot that they are plotted in.
series must be an instance of avoplot.series.DataSeriesBase or subclass
thereof.
"""
assert isinstance(series, DataSeriesBase), ("Expecting series object of "
"type DataSeriesBase.")
series.set_parent_element(self)
series._plot(self.get_subplot())
def update(self):
"""
Redraws the series.
"""
subplot = self.get_subplot()
if subplot: #subplot could be None - in which case do nothing
subplot.update()
def plot(self, subplot):
"""
Plots the data series into the specified subplot (AvoPlotSubplotBase
instance) and returns the list of matplotlib lines associated with the
series. This method should be overridden by subclasses.
"""
return []
def preprocess(self, *args):
"""
Runs any preprocessing required on the data and returns it. This
should be overridden by subclasses.
"""
#return the data passed in unchanged
return args
def is_plotted(self):
"""
Returns True if the series has already been plotted. False otherwise.
"""
return self.__plotted
class XYDataSeries(DataSeriesBase):
"""
Class to represent 2D XY data series.
"""
def __init__(self, name, xdata=None, ydata=None):
super(XYDataSeries, self).__init__(name)
self.set_xy_data(xdata, ydata)
self.add_control_panel(XYSeriesControls(self))
self.add_control_panel(XYSeriesFittingControls(self))
@staticmethod
def get_supported_subplot_type():
"""
Static method that returns the class of subplot that the data series
can be plotted into. This will be a subclass of AvoPlotSubplotBase.
"""
return AvoPlotXYSubplot
def copy(self):
x,y = self.get_data()
return XYDataSeries(self.get_name(), xdata=x, ydata=y)
def set_xy_data(self, xdata=None, ydata=None):
"""
Sets the x and y values of the data series. Note that you need to call
the update() method to draw the changes to the screen. Note that xdata
and ydata may be masked arrays (numpy.ma.masked_array) but only the
unmasked values will be stored.
"""
if xdata is None and ydata is None:
xdata = numpy.array([])
ydata = numpy.array([])
elif xdata is None:
xdata = numpy.arange(len(ydata))
elif ydata is None:
ydata = numpy.arange(len(xdata))
else:
assert len(xdata) == len(ydata)
#if either of the arrays are masked - then skip the masked values
if numpy.ma.is_masked(xdata):
xmask = xdata.mask
else:
xmask = numpy.zeros(len(xdata))
if numpy.ma.is_masked(ydata):
ymask = ydata.mask
else:
ymask = numpy.zeros(len(ydata))
data_mask = numpy.logical_not(numpy.logical_or(xmask, ymask))
data_idxs = numpy.where(data_mask)
self.__xdata = numpy.array(xdata)[data_idxs]
self.__ydata = numpy.array(ydata)[data_idxs]
if self.is_plotted():
#update the the data in the plotted line
line, = self.get_mpl_lines()
line.set_data(*self.preprocess(self.__xdata, self.__ydata))
def get_raw_data(self):
"""
Returns a tuple (xdata, ydata) of the raw data held by the series
(without any pre-processing operations performed). In general you should
use the get_data() method instead.
"""
return (self.__xdata, self.__ydata)
def get_length(self):
"""
Returns the number of data points in the series.
series.get_length() is equivalent to len(series.get_data()[0])
"""
return len(self.__xdata)
def get_data(self):
"""
Returns a tuple (xdata, ydata) of the data held by the series, with
any pre-processing operations applied to it.
"""
return self.preprocess(self.__xdata.copy(), self.__ydata.copy())
def preprocess(self, xdata, ydata):
"""
Runs any required preprocessing operations on the x and y data and
returns them.
"""
xdata, ydata = super(XYDataSeries, self).preprocess(xdata, ydata)
return xdata, ydata
def plot(self, subplot):
"""
plots the x,y data into the subplot as a line plot.
"""
return subplot.get_mpl_axes().plot(*self.get_data())
def export(self):
"""
Exports the selected data series. Called when user right clicks on the data series (see nav_panel.py).
"""
persistant_storage = PersistentStorage()
try:
last_path_used = persistant_storage.get_value("series_export_last_dir_used")
except KeyError:
last_path_used = ""
export_dialog = wx.FileDialog(None, message="Export data series as...",
defaultDir=last_path_used, defaultFile="AvoPlot Series.txt",
style=wx.SAVE|wx.FD_OVERWRITE_PROMPT, wildcard = "TXT files (*.txt)|*.txt")
if export_dialog.ShowModal() == wx.ID_OK:
path = export_dialog.GetPath()
persistant_storage.set_value("series_export_last_dir_used", os.path.dirname(path))
xdata, ydata = self.get_data()
with open(path, 'w') as fp:
if isinstance(xdata[0], datetime):
if isinstance(ydata[0], datetime):
for i in range(len(xdata)):
fp.write("%s\t%s\n" %(str(xdata[i]), str(ydata[i])))
else:
for i in range(len(xdata)):
fp.write("%s\t%f\n" %(str(xdata[i]), ydata[i]))
else:
if isinstance(ydata[0], datetime):
for i in range(len(xdata)):
fp.write("%f\t%s\n" %(xdata[i], str(ydata[i])))
else:
for i in range(len(xdata)):
fp.write("%f\t%f\n" %(xdata[i], ydata[i]))
export_dialog.Destroy()
class XYSeriesControls(controls.AvoPlotControlPanelBase):
"""
Control panel to allow user editing of data series (line styles,
colours etc.)
"""
def __init__(self, series):
super(XYSeriesControls, self).__init__("Series")
self.series = series
def setup(self, parent):
"""
Creates all the controls in the panel
"""
super(XYSeriesControls, self).setup(parent)
mpl_lines = self.series.get_mpl_lines()
#explicitly set the the marker colour to its existing value, otherwise
#it will get changed if we change the line colour
mpl_lines[0].set_markeredgecolor(mpl_lines[0].get_markeredgecolor())
mpl_lines[0].set_markerfacecolor(mpl_lines[0].get_markerfacecolor())
#add line controls
line_ctrls_static_szr = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, 'Line'), wx.VERTICAL)
self.linestyle_ctrl_panel = linestyle_editor.LineStyleEditorPanel(self, mpl_lines, self.series.update)
line_ctrls_static_szr.Add(self.linestyle_ctrl_panel, 0, wx.ALIGN_TOP | wx.ALIGN_RIGHT)
#add the marker controls
marker_ctrls_static_szr = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, 'Markers'), wx.VERTICAL)
self.marker_ctrls_panel = linestyle_editor.MarkerStyleEditorPanel(self, mpl_lines, self.series.update)
marker_ctrls_static_szr.Add(self.marker_ctrls_panel, 0, wx.ALIGN_TOP | wx.ALIGN_RIGHT)
#add the controls to the control panel's internal sizer
self.Add(line_ctrls_static_szr,0,wx.EXPAND|wx.ALL, border=5)
self.Add(marker_ctrls_static_szr,0,wx.EXPAND|wx.ALL, border=5)
line_ctrls_static_szr.Layout()
def on_display(self):
self.marker_ctrls_panel.SendSizeEvent()
self.linestyle_ctrl_panel.SendSizeEvent()
class FitDataSeries(XYDataSeries):
def __init__(self, s, xdata, ydata, fit_params):
super(FitDataSeries, self).__init__(s.get_name() + ' Fit', xdata, ydata)
self.fit_params = fit_params
self.add_control_panel(FitParamsCtrl(self))
s.add_subseries(self)
@staticmethod
def get_supported_subplot_type():
return AvoPlotXYSubplot
class FitParamsCtrl(controls.AvoPlotControlPanelBase):
"""
Control panel to display the best fit parameters of a FitDataSeries
"""
def __init__(self, series):
#call the parent class's __init__ method, passing it the name that we
#want to appear on the control panels tab.
super(FitParamsCtrl, self).__init__("Fit Parameters")
#store the data series object that this control panel is associated with,
#so that we can access it later
self.series = series
self.fit_params = series.fit_params
def setup(self, parent):
super(FitParamsCtrl, self).setup(parent)
label_text = wx.StaticText(self, -1, self.fit_params[0][0]+':')
self.Add(label_text, 0, wx.ALIGN_TOP|wx.ALL,border=10)
for name, value in self.fit_params[1:]:
label_text = wx.StaticText(self, -1, ''.join([" ",name,": ","%0.3e"%value]))
self.Add(label_text, 0, wx.ALIGN_TOP|wx.ALL,border=5)
class XYSeriesFittingControls(controls.AvoPlotControlPanelBase):
def __init__(self, series):
super(XYSeriesFittingControls, self).__init__("Maths")
self.series = series
self.__current_tool_idx = 0
def setup(self, parent):
"""
Creates all the controls in the panel
"""
super(XYSeriesFittingControls, self).setup(parent)
data_selection_static_sizer = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, 'Data selection'), wx.VERTICAL)
self.selection_panel = data_selection.DataRangeSelectionPanel(self, self.series)
data_selection_static_sizer.Add(self.selection_panel,1, wx.EXPAND)
self.Add(data_selection_static_sizer, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, border=5)
fit_type_static_sizer = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, 'Fitting'), wx.VERTICAL)
self.fit_type = wx.Choice(self, wx.ID_ANY, choices=[ft.name for ft in fitting.get_fitting_tools()])
fit_type_static_sizer.Add(self.fit_type,1, wx.ALIGN_RIGHT)
fit_button = wx.Button(self, -1, "Fit")
fit_type_static_sizer.Add(fit_button, 0, wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL)
self.Add(fit_type_static_sizer, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, border=5)
wx.EVT_BUTTON(self, fit_button.GetId(), self.on_fit)
wx.EVT_CHOICE(self, self.fit_type.GetId(), self.on_tool_choice)
stats_static_sizer = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, 'Statistics'), wx.VERTICAL)
self.samples_txt = wx.StaticText(self, wx.ID_ANY, "\tNum. Samples:")
self.mean_txt = wx.StaticText(self, wx.ID_ANY, "\tMean:")
self.stddev_txt = wx.StaticText(self, wx.ID_ANY, "\tStd. Dev.:")
self.min_txt = wx.StaticText(self, wx.ID_ANY, "\tMin. Value:")
self.max_txt = wx.StaticText(self, wx.ID_ANY, "\tMax. Value:")
stats_static_sizer.Add(self.samples_txt, 0, wx.ALIGN_LEFT)
stats_static_sizer.Add(self.mean_txt, 0, wx.ALIGN_LEFT)
stats_static_sizer.Add(self.stddev_txt, 0, wx.ALIGN_LEFT)
stats_static_sizer.Add(self.min_txt, 0, wx.ALIGN_LEFT)
stats_static_sizer.Add(self.max_txt, 0, wx.ALIGN_LEFT)
self.calc_button = wx.Button(self, wx.ID_ANY, "Calculate")
stats_static_sizer.Add(self.calc_button, 0, wx.ALIGN_CENTER_HORIZONTAL)
self.Add(stats_static_sizer, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, border=5)
wx.EVT_BUTTON(self, self.calc_button.GetId(), self.on_calculate)
self.span = None
def on_calculate(self, evnt):
mask = self.selection_panel.get_selection()
selected_idxs = numpy.where(mask)
raw_x, raw_y = self.series.get_data()
n_samples = len(selected_idxs[0])
self.samples_txt.SetLabel("\tNum. Samples: %d"%n_samples)
if n_samples > 0: #if not an empty selection
self.mean_txt.SetLabel("\tMean: %e"%numpy.mean(raw_y[selected_idxs]))
self.stddev_txt.SetLabel("\tStd. Dev.: %e"%numpy.std(raw_y[selected_idxs]))
self.min_txt.SetLabel("\tMin. Value: %e"%numpy.min(raw_y[selected_idxs]))
self.max_txt.SetLabel("\tMax. Value: %e"%numpy.max(raw_y[selected_idxs]))
else:
self.mean_txt.SetLabel("\tMean:")
self.stddev_txt.SetLabel("\tStd. Dev.:")
self.min_txt.SetLabel("\tMin. Value:")
self.max_txt.SetLabel("\tMax. Value:")
def on_tool_choice(self, evnt):
self.__current_tool_idx = self.fit_type.GetCurrentSelection()
def on_fit(self, evnt):
mask = self.selection_panel.get_selection()
selected_idxs = numpy.where(mask)
raw_x, raw_y = self.series.get_data()
fitting_tool = fitting.get_fitting_tools()[self.__current_tool_idx]
fit_x_data, fit_y_data, fit_params = fitting_tool.fit(raw_x[selected_idxs],
raw_y[selected_idxs])
FitDataSeries(self.series, fit_x_data, fit_y_data, fit_params)
self.series.update()
def on_control_panel_active(self):
"""
This gets called automatically when the control panel is selected.
"""
self.selection_panel.enable_selection()
def on_control_panel_inactive(self):
"""
This gets called automatically when the control panel is un-selected.
"""
self.selection_panel.disable_selection()
| gpl-3.0 |
spallavolu/scikit-learn | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
maciekcc/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | 62 | 9268 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchTwoWithOneEpoch(self):
array = np.arange(5) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"value_placeholder": [10, 11]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"value_placeholder": [12, 13]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"value_placeholder": [14]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self):
array = np.arange(2) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"value_placeholder": [10, 11, 10, 11],
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchTwoWithOneEpoch(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 37)
array2 = np.arange(64, 69)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [96, 97],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [98, 99],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [100],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 34)
array2 = np.arange(64, 66)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [96, 97, 96, 97],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self):
a = np.arange(32, 37)
b = np.arange(64, 69)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self):
a = np.arange(32, 34)
b = np.arange(64, 66)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
test.main()
| apache-2.0 |
tareqmalas/girih | scripts/sisc/paper_plot_increasing_grid_size.py | 2 | 6905 | #!/usr/bin/env python
def main():
import sys
raw_data = load_csv(sys.argv[1])
k_l = set()
for k in raw_data:
k_l.add(get_stencil_num(k))
k_l = list(k_l)
for k in k_l:
for is_dp in [1]:
for t in [0, 1]:
plot_lines(raw_data, k, is_dp, t)
def get_stencil_num(k):
# add the stencil operator
if k['Stencil Kernel coefficients'] in 'constant':
if int(k['Stencil Kernel semi-bandwidth'])==4:
stencil = 0
else:
stencil = 1
elif 'no-symmetry' in k['Stencil Kernel coefficients']:
stencil = 5
elif 'sym' in k['Stencil Kernel coefficients']:
if int(k['Stencil Kernel semi-bandwidth'])==1:
stencil = 3
else:
stencil = 4
else:
stencil = 2
return stencil
def plot_lines(raw_data, stencil_kernel, is_dp, t):
from operator import itemgetter
import matplotlib.pyplot as plt
import matplotlib
import pylab
from pylab import arange,pi,sin,cos,sqrt
m = 3.0
fig_width = 4.0*0.393701*m # inches
fig_height = 1.0*fig_width #* 210.0/280.0#433.62/578.16
fig_size = [fig_width,fig_height]
params = {
'axes.labelsize': 6*m,
'axes.linewidth': 0.25*m,
'lines.linewidth': 0.75*m,
'text.fontsize': 7*m,
'legend.fontsize': 5*m,
'xtick.labelsize': 6*m,
'ytick.labelsize': 6*m,
'lines.markersize': 1,
'text.usetex': True,
'figure.figsize': fig_size}
pylab.rcParams.update(params)
ts_l = set()
for k in raw_data:
ts_l.add(k['Time stepper orig name'])
ts_l = list(ts_l)
th = set()
for k in raw_data:
th.add(int(k['OpenMP Threads']))
th = list(th)
tb_l = set()
for k in raw_data:
tb_l.add(k['Time unroll'])
tb_l = list(tb_l)
tb_l = map(int,tb_l)
tb_l.sort()
tgs_l = set()
for k in raw_data:
tgs_l.add(k['Thread group size'])
tgs_l = list(tgs_l)
tgs_l = map(int,tgs_l)
tgs_l.sort()
req_fields = [('Thread group size', int), ('WD main-loop RANK0 MStencil/s MAX', float), ('Time stepper orig name', str), ('OpenMP Threads', int), ('MStencil/s MAX', float), ('Time unroll',int)]
req_fields = req_fields + [ ('Sustained Memory BW', float)]
req_fields = req_fields + [('Local NX', int), ('Local NY', int), ('Local NZ', int)]
data = []
for k in raw_data:
tup = {}
# add the general fileds
for f in req_fields:
try:
tup[f[0]] = map(f[1], [k[f[0]]] )[0]
except:
print("ERROR: results entry missing essential data")
print k
return
# add the stencil operator
tup['stencil'] = get_stencil_num(k)
# add the precision information
if k['Precision'] in 'DP':
p = 1
else:
p = 0
tup['Precision'] = p
data.append(tup)
data = sorted(data, key=itemgetter('Time stepper orig name', 'Thread group size', 'Local NY'))
#for i in data: print i
max_single = 0
# fig, ax1 = plt.subplots()
# lns = []
markers = 'o^v*x'
marker_s = 7
line_w = 1
line_s = '-'
x = []
y = []
y_m = []
max_x = 0
max_y = 0
for k in data:
if ( ('Naive' in k['Time stepper orig name']) and (k['stencil']==stencil_kernel) and (k['Precision']==is_dp)):
y_m.append(k['Sustained Memory BW']/10**3)
x.append(k['Local NY'])
y.append(k['MStencil/s MAX']/10**3)
marker = markers[0]
col = 'g'
ts2 = 'Spt.blk.'
if(x) and t==0:
plt.plot(x, y, color=col, marker=marker, markersize=marker_s, linestyle=line_s, linewidth=line_w, label=ts2)
max_y = max(y)
max_x = max(x)
if(y_m) and t==1:
plt.plot(x, y_m, color=col, marker=marker, markersize=marker_s, linestyle=line_s, linewidth=line_w, label=ts2)
perf_str = 'WD main-loop RANK0 MStencil/s MAX'
cols = {0:'y', 1:'k', 2:'b', 5:'c', 10:'m'}
for idx, tgs in enumerate([1,2,5, 10]):
marker = markers[1+idx]
x = []
y = []
y_m = []
for k in data:
if ( ('Diamond' in k['Time stepper orig name']) and (k['Thread group size'] == tgs) and (k['stencil']==stencil_kernel) and (k['Precision']==is_dp) ):
y_m.append(k['Sustained Memory BW']/10**3)
x.append(k['Local NY'])
y.append(k[perf_str]/10**3)
col = cols[tgs]
ts2 = str(tgs) + 'WD'
if(x) and t==0:
plt.plot(x, y, color=col, marker=marker, markersize=marker_s, linestyle=line_s, linewidth=line_w, label=ts2)
max_y = max(max(y), max_y)
max_x = max(max(x), max_x)
if(y_m) and t==1:
plt.plot(x, y_m, color=col, marker=marker, markersize=marker_s, linestyle=line_s, linewidth=line_w, label=ts2)
# add limits
sus_mem_bw = 40.0 #IB
# divide by streams number
if stencil_kernel == 0:
mem_limit = sus_mem_bw/4.0
elif stencil_kernel == 1:
mem_limit = sus_mem_bw/3.0
elif stencil_kernel == 2:
mem_limit = sus_mem_bw/5.0
elif stencil_kernel == 3:
mem_limit = sus_mem_bw/6.0
elif stencil_kernel == 4:
mem_limit = sus_mem_bw/16.0
elif stencil_kernel == 5:
mem_limit = sus_mem_bw/10.0
# divide by word size
if is_dp == 1:
mem_limit = mem_limit/8.0
else:
mem_limit = mem_limit/4.0
if t == 0:
plt.plot([1, max_x], [mem_limit, mem_limit], color='.2', linestyle='--', label='Spt.lim.')
if t == 0:
title = '_strongscaling_perf' #'Strong scaling performance'
if stencil_kernel == 1: plt.ylabel('GLUP/s')
else:
title = '_strongscaling_bw' #'Strong scaling main memory BW usage'
if stencil_kernel == 1: plt.ylabel('GBytes/s')
if stencil_kernel == 0:
title = '25_pt_const' + title
elif stencil_kernel == 1:
title = '7_pt_const' + title
elif stencil_kernel == 4:
title = '25_pt_var' + title
elif stencil_kernel == 5:
title = '7_pt_var' + title
f_name = title.replace(' ', '_')
if t==0: plt.ylim([0,max_y*1.1])
plt.xlabel('Size in each dimension')
if t == 0 and stencil_kernel == 1: plt.legend(loc='best')
plt.grid()
pylab.savefig(f_name+'.png', bbox_inches="tight", pad_inches=0.04)
pylab.savefig(f_name+'.pdf', format='pdf', bbox_inches="tight", pad_inches=0)
#plt.show()
plt.clf()
def load_csv(data_file):
from csv import DictReader
with open(data_file, 'rb') as output_file:
data = DictReader(output_file)
data = [k for k in data]
return data
if __name__ == "__main__":
main()
| bsd-3-clause |
rsivapr/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 8 | 1673 | """
==========================
SGD: Convex Loss Functions
==========================
An example that compares various convex loss functions.
All of the above loss functions are supported by
:class:`sklearn.linear_model.stochastic_gradient` .
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.linear_model.sgd_fast import SquaredHinge
from sklearn.linear_model.sgd_fast import Hinge
from sklearn.linear_model.sgd_fast import ModifiedHuber
from sklearn.linear_model.sgd_fast import SquaredLoss
###############################################################################
# Define loss functions
xmin, xmax = -4, 4
hinge = Hinge(1)
squared_hinge = SquaredHinge()
perceptron = Hinge(0)
log_loss = lambda z, p: np.log2(1.0 + np.exp(-z))
modified_huber = ModifiedHuber()
squared_loss = SquaredLoss()
###############################################################################
# Plot loss funcitons
xx = np.linspace(xmin, xmax, 100)
pl.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
pl.plot(xx, [hinge.loss(x, 1) for x in xx], 'g-',
label="Hinge loss")
pl.plot(xx, [perceptron.loss(x, 1) for x in xx], 'm-',
label="Perceptron loss")
pl.plot(xx, [log_loss(x, 1) for x in xx], 'r-',
label="Log loss")
#pl.plot(xx, [2 * squared_loss.loss(x, 1) for x in xx], 'c-',
# label="Squared loss")
pl.plot(xx, [squared_hinge.loss(x, 1) for x in xx], 'b-',
label="Squared hinge loss")
pl.plot(xx, [modified_huber.loss(x, 1) for x in xx], 'y--',
label="Modified huber loss")
pl.ylim((0, 8))
pl.legend(loc="upper right")
pl.xlabel(r"$y \cdot f(x)$")
pl.ylabel("$L(y, f(x))$")
pl.show()
| bsd-3-clause |
MartinSavc/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
sanjuroj/bioscripts | probation/venn.py | 1 | 2076 | #!/bin/env python2.7
import sys
import os
import argparse
from matplotlib import pyplot as plt
import numpy as np
from matplotlib_venn import venn3, venn3_circles
def main():
args=processArgs()
plt.figure(figsize=(4,4))
v = venn3(subsets=(1, 1, 1, 1, 1, 1, 1), set_labels = ('GenomicHit', 'NoGenomicHit', 'JillPipeline'))
v.get_patch_by_id('100').set_alpha(1.0)
v.get_patch_by_id('100').set_color('white')
v.get_label_by_id('100').set_text('Unknown')
v.get_label_by_id('A').set_text('Set "A"')
#c = venn3_circles(subsets=(1, 1, 1, 1, 1, 1, 1), linestyle='dashed')
#c[0].set_lw(1.0)
#c[0].set_ls('dotted')
plt.title("Sample Venn diagram")
plt.annotate('Unknown set', xy=v.get_label_by_id('100').get_position() - np.array([0, 0.05]), xytext=(-70,-70),
ha='center', textcoords='offset points', bbox=dict(boxstyle='round,pad=0.5', fc='gray', alpha=0.1),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.5',color='gray'))
plt.show()
def processArgs():
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('\nError: %s\n\n' % message)
self.print_help()
sys.exit(2)
class Checkerrors(argparse.Action) :
def __call__(self,parser,namespace,value,option_string) :
if (option_string==None) :
if (os.path.isfile(value)==False) :
parser.error("The -f flag needs a valid filename")
else :
setattr(namespace,self.dest,value)
#argParser = MyParser(usage=("%s (sourceDir & filter) | filterFile" % (os.path.basename(sys.argv[0]))))
argParser = MyParser(description="""CHANGEME
""")
argParser.add_argument('file', metavar="", action=Checkerrors, help="Changeme")
ap=argParser.parse_args()
return ap
#This is required because by default this is a module. Running this makes it execute main as if it is a script
if __name__ == '__main__':
main()
| mit |
dtiarks/ThesisPlot | Chap2/Hopping/hopping.py | 1 | 3994 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 20 16:34:55 2017
@author: daniel
"""
import matplotlib.pyplot as plt
from scipy.optimize import newton
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import newton
import numpy as np
import datetime
from scipy.integrate import odeint
from scipy.misc import derivative
### PARAMETERS AND CONSTANTS
hbar = 6.626070040e-34/(2 * np.pi) # Js, Planck constant, CODATA 2014
rho_peak = 2.0e12/1e-6 # peak density in cm^-3/centi^-3
d = 2.534e-29 # Cm, dipole matrix element (D. A. Steck)
Gamma_e = 2*np.pi * 6.065e6 # decay rate (D. A. Steck)
epsilon_0 = 8.854187817e-12 # dielectric constant, CODATA 2014
L = 60e-6 # medium length in m
omega_s = 2*np.pi * 384.23e12 # rad/s, transition frequency
c = 299792458 # m/s, speed of light CODATA 2014
a0 = 0.52917721067e-10 # m, Bohr radius
C6 = 2.3e23 * 4.36e-18 * a0**6 # Jm^6, Van-der-Waals coefficient for the 67s - 69s
# interaction potential V=-C6/R^6, converted from atomic units with the Hartree
# energy and the Bohr radius
# the parameters, all in units of Gamma_3
Delta_c = 2*np.pi*15.0*10**6/Gamma_e
gamma_21 = 0.0577314
Omega_c = 2*np.pi*8.*10**6/Gamma_e
def susceptibility(Delta_s, Delta_c, gamma_21, Omega_c, ladder=True):
delta = (Delta_s + (-1 + 2*int(ladder)) * Delta_c) # two photon detuning
return 1j*(gamma_21 - 2j * delta)/(np.abs(Omega_c)**2 + (1 - 2j * Delta_s)*(gamma_21 - 2j * delta))
chi_0 = 2*rho_peak*d**2 / (epsilon_0*hbar*Gamma_e) # prefactor of the susceptibility for the cycling transition (|R> polarization)
intersection = newton(lambda x: np.imag(susceptibility(x, Delta_c, gamma_21, Omega_c) - susceptibility(x, Delta_c, gamma_21, 0)), -Delta_c)
def vdW_pot(r, r0):
return -C6 * (r-r0)**-6
def index(det):
chi=chi_0 * susceptibility(intersection, det, gamma_21, Omega_c)
n=np.sqrt(1+np.real(chi)) # index of refraction
return n
def group_velocity(d_c):
''' group velocities in meters/second for the given susceptibility array chi
with frequency distance d_omega between the points.'''
d_o = Delta_c*0.01
n=index(d_c)
dn=derivative(index, d_c, dx=d_o)
v_gr = c/(n + omega_s * dn)
return v_gr# formula from Fleischhauer's Rev. Mod. Phys. 2005
# calculate the intersection of the imaginary parts
#print "differential phase shift over whole cloud at equal absorption:"
#print omega_s/(2*c) * L * chi_0 *np.real(susceptibility(intersection, Delta_c, gamma_21, Omega_c)- susceptibility(intersection, Delta_c, gamma_21, 0))
# calculate the transmission and phase curves
detuning = np.linspace(-4.5, -0.5, 400)
R0=-L/2
t=np.linspace(0,0.63e-6*Gamma_e,50000)
def func(R,ts):
d_c = Delta_c - vdW_pot(R, 0)/(hbar*Gamma_e)
d_o = (detuning[1]-detuning[0]) * Gamma_e
d_o = Delta_c*0.01
n=index(d_c)
dn=derivative(index, d_c, dx=d_o)
v_gr = c/(n + omega_s * dn)
return v_gr
Rs=odeint(func,R0,t)
D_Ep=5e6
def PRR(tss,Rss):
# v=np.array(vdW_pot(Rss,0)/(hbar))[:,0]
v=np.array(vdW_pot(Rss,0)/(hbar))
A=np.array(v**2/(v**2+D_Ep**2))
Om=2*np.pi*np.sqrt(v**2+D_Ep**2)
P=A*np.sin(0.5*Om*tss)**2
return P
fig=plt.figure(0,figsize=(9,10))
plt.suptitle("Hopping dynamics",fontsize=15, fontweight='bold')
ax1=fig.add_subplot(211)
ax1.plot(1e6*t/Gamma_e,1e6*Rs,label='w/ excitation')
ax1.plot(1e6*t/Gamma_e,1e6*group_velocity(Delta_c)*t+1e6*R0,label='w/o excitation')
ax1.axhline(-1e6*R0,c='k',label='Medium border')
ax1.axhline(1e6*R0,c='k')
ax1.axhline(-15,c='r',label=u'$R_b=15\mu m$')
ax1.axhline(15,c='r')
ax1.set_ylabel("Distance (um)")
ax1.set_xlabel("Time (us)")
ax1.legend(loc=2)
ax2=fig.add_subplot(212)
#ax2.plot(1e6*t/Gamma_e,PRR(t/Gamma_e,Rs))
RbMask=np.abs(Rs)>15e-6
Rbs=Rs[RbMask]
ax2.plot(1e6*Rbs,PRR(np.array(t)[RbMask[:,0]]/Gamma_e,Rbs))
#ax2.axvline(0.0814,c='r')
#ax2.axvline(-0.083,c='r')
ax2.set_ylabel("Population reversed state")
ax2.set_xlabel("Distance travalled (um)")
plt.savefig("hopping.pdf")
plt.show()
| mit |
keir-rex/zipline | tests/finance/test_slippage.py | 32 | 18400 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for finance.slippage
"""
import datetime
import pytz
from unittest import TestCase
from nose_parameterized import parameterized
import pandas as pd
from zipline.finance.slippage import VolumeShareSlippage
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.finance.blotter import Order
class SlippageTestCase(TestCase):
def test_volume_share_slippage(self):
event = Event(
{'volume': 200,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'open': 3.0}
)
slippage_model = VolumeShareSlippage()
open_orders = [
Order(dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
amount=100,
filled=0,
sid=133)
]
orders_txns = list(slippage_model.simulate(
event,
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.01875),
'dt': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'amount': int(50),
'sid': int(133),
'commission': None,
'type': DATASOURCE_TYPE.TRANSACTION,
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
# TODO: Make expected_txn an Transaction object and ensure there
# is a __eq__ for that class.
self.assertEquals(expected_txn, txn.__dict__)
def test_orders_limit(self):
events = self.gen_trades()
slippage_model = VolumeShareSlippage()
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'limit': 3.6})
]
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 1)
txn = orders_txns[0][1]
expected_txn = {
'price': float(3.500875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(100),
'sid': int(133),
'order_id': open_orders[0].id
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
expected_txn = {}
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'limit': 3.4})
]
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.499125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-100),
'sid': int(133)
}
self.assertIsNotNone(txn)
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
STOP_ORDER_CASES = {
# Stop orders can be long/short and have their price greater or
# less than the stop.
#
# A stop being reached is conditional on the order direction.
# Long orders reach the stop when the price is greater than the stop.
# Short orders reach the stop when the price is less than the stop.
#
# Which leads to the following 4 cases:
#
# | long | short |
# | price > stop | | |
# | price < stop | | |
#
# Currently the slippage module acts according to the following table,
# where 'X' represents triggering a transaction
# | long | short |
# | price > stop | | X |
# | price < stop | X | |
#
# However, the following behavior *should* be followed.
#
# | long | short |
# | price > stop | X | |
# | price < stop | | X |
'long | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 4.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 4.0,
'open': 3.5
},
'expected': {
'transaction': {
'price': 4.001,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': 100,
'sid': 133,
}
}
},
'long | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 3.6
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 4.0
},
'expected': {
'transaction': None
}
},
'short | price gt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.4
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.5,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.5,
'open': 3.0
},
'expected': {
'transaction': None
}
},
'short | price lt stop': {
'order': {
'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.5
},
'event': {
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'volume': 2000,
'price': 3.0,
'high': 3.15,
'low': 2.85,
'sid': 133,
'close': 3.0,
'open': 3.0
},
'expected': {
'transaction': {
'price': 2.99925,
'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'),
'amount': -100,
'sid': 133,
}
}
},
}
@parameterized.expand([
(name, case['order'], case['event'], case['expected'])
for name, case in STOP_ORDER_CASES.items()
])
def test_orders_stop(self, name, order_data, event_data, expected):
order = Order(**order_data)
event = Event(initial_values=event_data)
slippage_model = VolumeShareSlippage()
try:
_, txn = next(slippage_model.simulate(event, [order]))
except StopIteration:
txn = None
if expected['transaction'] is None:
self.assertIsNone(txn)
else:
self.assertIsNotNone(txn)
for key, value in expected['transaction'].items():
self.assertEquals(value, txn[key])
def test_orders_stop_limit(self):
events = self.gen_trades()
slippage_model = VolumeShareSlippage()
# long, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.0})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# long, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': 100,
'filled': 0,
'sid': 133,
'stop': 4.0,
'limit': 3.6})
]
orders_txns = list(slippage_model.simulate(
events[2],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[3],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.500875),
'dt': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'amount': int(100),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
# short, does not trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 4.0})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does not trade - impacted price worse than limit price
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 3.5})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 0)
# short, does trade
open_orders = [
Order(**{
'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc),
'amount': -100,
'filled': 0,
'sid': 133,
'stop': 3.0,
'limit': 3.4})
]
orders_txns = list(slippage_model.simulate(
events[0],
open_orders
))
self.assertEquals(len(orders_txns), 0)
orders_txns = list(slippage_model.simulate(
events[1],
open_orders
))
self.assertEquals(len(orders_txns), 1)
_, txn = orders_txns[0]
expected_txn = {
'price': float(3.499125),
'dt': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'amount': int(-100),
'sid': int(133)
}
for key, value in expected_txn.items():
self.assertEquals(value, txn[key])
def gen_trades(self):
# create a sequence of trades
events = [
Event({
'volume': 2000,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 31, tzinfo=pytz.utc),
'open': 3.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.5,
'datetime': datetime.datetime(
2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.5,
'dt':
datetime.datetime(2006, 1, 5, 14, 32, tzinfo=pytz.utc),
'open': 3.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 4.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 33, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 4.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 33, tzinfo=pytz.utc),
'open': 3.5
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.5,
'datetime': datetime.datetime(
2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.5,
'dt':
datetime.datetime(2006, 1, 5, 14, 34, tzinfo=pytz.utc),
'open': 4.0
}),
Event({
'volume': 2000,
'type': 4,
'price': 3.0,
'datetime': datetime.datetime(
2006, 1, 5, 14, 35, tzinfo=pytz.utc),
'high': 3.15,
'low': 2.85,
'sid': 133,
'source_id': 'test_source',
'close': 3.0,
'dt':
datetime.datetime(2006, 1, 5, 14, 35, tzinfo=pytz.utc),
'open': 3.5
})
]
return events
| apache-2.0 |
peterwilletts24/Python-Scripts | plot_scripts/EMBRACE/heat_flux/plot_from_pp_3234_diff_12km.py | 1 | 5598 | """
Load pp, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.analysis.cartography
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import scipy.interpolate
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
experiment_ids = ['dklwu']
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/unrotate_pole.py')
pp_file = '3234_mean'
degs_crop_top = 3.7
degs_crop_bottom = 3.5
degs_crop_left = 2
degs_crop_right = 3
min_contour = -50
max_contour = 50
tick_interval=20
#
# cmap= cm.s3pcpn_l
divisor=10 # for lat/lon rounding
def main():
# Load diff cube
gl = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/dklz/dklzq/%s.pp' % pp_file
glob = iris.load_cube(gl)
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file)
pcube = iris.load_cube(pfile)
lat = pcube.coord('grid_latitude').points
lon = pcube.coord('grid_longitude').points
cs = pcube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' %s - Unrotate pole %s' % (experiment_id,cs)
lons, lats = np.meshgrid(lon, lat)
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
#pcube.remove_coord('grid_latitude')
#pcube.remove_coord('grid_longitude')
#pcube.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord)
#pcube.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord)
lon_min=np.min(lon)
lon_max=np.max(lon)
lon_low_tick=lon_min -(lon_min%divisor)
lon_high_tick=math.ceil(lon_max/divisor)*divisor
lat_min=np.min(lat)
lat_max=np.max(lat)
lat_low_tick=lat_min - (lat_min%divisor)
lat_high_tick=math.ceil(lat_max/divisor)*divisor
pcubediff=pcube-glob
plt.figure(figsize=(8,8))
cmap= cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_min+degs_crop_left,lon_max-degs_crop_right,lat_min+degs_crop_bottom,lat_max-degs_crop_top))
clevs = np.linspace(min_contour, max_contour,9)
cont = iplt.contourf(pcubediff, clevs, cmap=cmap, extend='both')
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = plt.colorbar(cont, orientation='horizontal', pad=0.05, extend='both', format = '%d')
#cbar.set_label('')
cbar.set_label(pcube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['%d' % i for i in ticks])
main_title='%s - Difference' % pcube.standard_name.title().replace('_',' ')
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
model_info = re.sub(r'[(\']', ' ', model_info)
model_info = re.sub(r'[\',)]', ' ', model_info)
print model_info
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, pp_file)): os.makedirs('%s%s/%s' % (save_path, experiment_id, pp_file))
plt.savefig('%s%s/%s/%s_%s_notitle_diff_8km.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
plt.savefig('%s%s/%s/%s_%s_diff_8km.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
jm-begon/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
sinhrks/pandas-ml | pandas_ml/smaccessors/test/test_datasets.py | 1 | 7696 | #!/usr/bin/env python
import pandas as pd
import statsmodels.api as sm
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestStatsModelsDatasets(tm.TestCase):
load_method = 'load'
def test_anes96(self):
data = getattr(sm.datasets.anes96, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (944, 6))
self.assertEqual(df.target_name, 'PID')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_cancer(self):
data = getattr(sm.datasets.cancer, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (301, 2))
self.assertEqual(df.target_name, 'cancer')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_ccard(self):
data = getattr(sm.datasets.ccard, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (72, 5))
self.assertEqual(df.target_name, 'AVGEXP')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_committee(self):
data = getattr(sm.datasets.committee, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (20, 6))
self.assertEqual(df.target_name, 'BILLS104')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_copper(self):
data = getattr(sm.datasets.copper, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (25, 6))
self.assertEqual(df.target_name, 'WORLDCONSUMPTION')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_cpunish(self):
data = getattr(sm.datasets.cpunish, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (17, 7))
self.assertEqual(df.target_name, 'EXECUTIONS')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_elnino(self):
data = getattr(sm.datasets.elnino, self.load_method)()
msg = "Unable to read statsmodels Dataset without exog"
with self.assertRaisesRegexp(ValueError, msg):
pdml.ModelFrame(data)
def test_engel(self):
data = getattr(sm.datasets.engel, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (235, 2))
self.assertEqual(df.target_name, 'income')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_grunfeld(self):
data = getattr(sm.datasets.grunfeld, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (220, 5))
self.assertEqual(df.target_name, 'invest')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_longley(self):
data = getattr(sm.datasets.longley, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (16, 7))
self.assertEqual(df.target_name, 'TOTEMP')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_macrodata(self):
data = getattr(sm.datasets.macrodata, self.load_method)()
msg = "Unable to read statsmodels Dataset without exog"
with self.assertRaisesRegexp(ValueError, msg):
pdml.ModelFrame(data)
def test_modechoice(self):
data = getattr(sm.datasets.modechoice, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (840, 7))
self.assertEqual(df.target_name, 'choice')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_nile(self):
data = getattr(sm.datasets.nile, self.load_method)()
msg = "Unable to read statsmodels Dataset without exog"
with self.assertRaisesRegexp(ValueError, msg):
pdml.ModelFrame(data)
def test_randhie(self):
data = getattr(sm.datasets.randhie, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (20190, 10))
self.assertEqual(df.target_name, 'mdvis')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_scotland(self):
data = getattr(sm.datasets.scotland, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (32, 8))
self.assertEqual(df.target_name, 'YES')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_spector(self):
data = getattr(sm.datasets.spector, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (32, 4))
self.assertEqual(df.target_name, 'GRADE')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_stackloss(self):
data = getattr(sm.datasets.stackloss, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (21, 4))
self.assertEqual(df.target_name, 'STACKLOSS')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_star98(self):
data = getattr(sm.datasets.star98, self.load_method)()
msg = 'Data must be 1-dimensional'
with self.assertRaisesRegexp(Exception, msg):
pdml.ModelFrame(data)
def test_strikes(self):
data = getattr(sm.datasets.strikes, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (62, 2))
self.assertEqual(df.target_name, 'duration')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_sunspots(self):
data = getattr(sm.datasets.sunspots, self.load_method)()
msg = "Unable to read statsmodels Dataset without exog"
with self.assertRaisesRegexp(ValueError, msg):
pdml.ModelFrame(data)
def test_fair(self):
data = getattr(sm.datasets.fair, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (6366, 9))
self.assertEqual(df.target_name, 'affairs')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_heart(self):
data = getattr(sm.datasets.heart, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (69, 3))
self.assertEqual(df.target_name, 'survival')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_statecrime(self):
data = getattr(sm.datasets.statecrime, self.load_method)()
df = pdml.ModelFrame(data)
self.assertEqual(df.shape, (51, 5))
self.assertEqual(df.target_name, 'murder')
tm.assert_index_equal(df.data.columns, pd.Index(data.exog_name))
def test_co2(self):
data = getattr(sm.datasets.co2, self.load_method)()
msg = "Unable to read statsmodels Dataset without exog"
with self.assertRaisesRegexp(ValueError, msg):
pdml.ModelFrame(data)
class TestStatsModelsDatasets_LoadPandas(TestStatsModelsDatasets):
load_method = 'load_pandas'
def test_star98(self):
data = sm.datasets.star98.load_pandas()
msg = 'cannot copy sequence with size 2 to array axis with dimension 303'
with self.assertRaisesRegexp(Exception, msg):
pdml.ModelFrame(data)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| bsd-3-clause |
QISKit/qiskit-sdk-py | qiskit/tools/jupyter/backend_monitor.py | 1 | 19040 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""A module for monitoring backends."""
import types
import math
import datetime
from IPython.display import display # pylint: disable=import-error
import matplotlib.pyplot as plt # pylint: disable=import-error
from matplotlib.patches import Circle # pylint: disable=import-error
import ipywidgets as widgets # pylint: disable=import-error
from qiskit.exceptions import QiskitError
from qiskit.visualization.gate_map import plot_gate_map, plot_error_map
try:
# pylint: disable=import-error
from qiskit.providers.ibmq import IBMQBackend
except ImportError:
pass
MONTH_NAMES = {1: 'Jan.',
2: 'Feb.',
3: 'Mar.',
4: 'Apr.',
5: 'May',
6: 'June',
7: 'July',
8: 'Aug.',
9: 'Sept.',
10: 'Oct.',
11: 'Nov.',
12: 'Dec.'
}
def _load_jobs_data(self, change):
"""Loads backend jobs data
"""
if change['new'] == 4 and not self._did_jobs:
self._did_jobs = True
year = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
month = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
week = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
self.children[4].children = [year, month, week]
self.children[4].set_title(0, 'Year')
self.children[4].set_title(1, 'Month')
self.children[4].set_title(2, 'Week')
self.children[4].selected_index = 1
_build_job_history(self.children[4], self._backend)
def _backend_monitor(backend):
""" A private function to generate a monitor widget
for a IBMQ bakend repr.
Args:
backend (IBMQbackend): The backend.
Raises:
QiskitError: Input is not an IBMQBackend
"""
if not isinstance(backend, IBMQBackend):
raise QiskitError('Input variable is not of type IBMQBackend.')
title_style = "style='color:#ffffff;background-color:#000000;padding-top: 1%;"
title_style += "padding-bottom: 1%;padding-left: 1%; margin-top: 0px'"
title_html = "<h1 {style}>{name}</h1>".format(
style=title_style, name=backend.name())
details = [config_tab(backend)]
tab_contents = ['Configuration']
# Empty jobs tab widget
jobs = widgets.Tab(layout=widgets.Layout(max_height='620px'))
if not backend.configuration().simulator:
tab_contents.extend(['Qubit Properties', 'Multi-Qubit Gates',
'Error Map', 'Job History'])
details.extend([qubits_tab(backend), gates_tab(backend),
detailed_map(backend), jobs])
tabs = widgets.Tab(layout=widgets.Layout(overflow_y='scroll'))
tabs.children = details
for i in range(len(details)):
tabs.set_title(i, tab_contents[i])
# Make backend accesible to tabs widget
tabs._backend = backend # pylint: disable=attribute-defined-outside-init
tabs._did_jobs = False
# pylint: disable=attribute-defined-outside-init
tabs._update = types.MethodType(_load_jobs_data, tabs)
tabs.observe(tabs._update, names='selected_index')
title_widget = widgets.HTML(value=title_html,
layout=widgets.Layout(margin='0px 0px 0px 0px'))
bmonitor = widgets.VBox([title_widget, tabs],
layout=widgets.Layout(border='4px solid #000000',
max_height='650px', min_height='650px',
overflow_y='hidden'))
display(bmonitor)
def config_tab(backend):
"""The backend configuration widget.
Args:
backend (IBMQbackend): The backend.
Returns:
grid: A GridBox widget.
"""
status = backend.status().to_dict()
config = backend.configuration().to_dict()
config_dict = {**status, **config}
upper_list = ['n_qubits', 'operational',
'status_msg', 'pending_jobs',
'backend_version', 'basis_gates',
'max_shots', 'max_experiments']
lower_list = list(set(config_dict.keys()).difference(upper_list))
# Remove gates because they are in a different tab
lower_list.remove('gates')
# Look for hamiltonian
if 'hamiltonian' in lower_list:
htex = config_dict['hamiltonian']['h_latex']
config_dict['hamiltonian'] = "$$%s$$" % htex
upper_str = "<table>"
upper_str += """<style>
table {
border-collapse: collapse;
width: auto;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f6f6f6;}
</style>"""
footer = "</table>"
# Upper HBox widget data
upper_str += "<tr><th>Property</th><th>Value</th></tr>"
for key in upper_list:
upper_str += "<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td></tr>" % (
key, config_dict[key])
upper_str += footer
upper_table = widgets.HTMLMath(
value=upper_str, layout=widgets.Layout(width='100%', grid_area='left'))
image_widget = widgets.Output(
layout=widgets.Layout(display='flex-inline', grid_area='right',
padding='10px 10px 10px 10px',
width='auto', max_height='325px',
align_items='center'))
if not config['simulator']:
with image_widget:
gate_map = plot_gate_map(backend)
display(gate_map)
plt.close(gate_map)
lower_str = "<table>"
lower_str += """<style>
table {
border-collapse: collapse;
width: auto;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f6f6f6;}
</style>"""
lower_str += "<tr><th></th><th></th></tr>"
for key in lower_list:
if key != 'name':
lower_str += "<tr><td>%s</td><td>%s</td></tr>" % (
key, config_dict[key])
lower_str += footer
lower_table = widgets.HTMLMath(value=lower_str,
layout=widgets.Layout(
width='auto',
grid_area='bottom'))
grid = widgets.GridBox(children=[upper_table, image_widget, lower_table],
layout=widgets.Layout(
grid_template_rows='auto auto',
grid_template_columns='31% 23% 23% 23%',
grid_template_areas='''
"left right right right"
"bottom bottom bottom bottom"
''',
grid_gap='0px 0px'))
return grid
def qubits_tab(backend):
"""The qubits properties widget
Args:
backend (IBMQbackend): The backend.
Returns:
VBox: A VBox widget.
"""
props = backend.properties().to_dict()
header_html = "<div><font style='font-weight:bold'>{key}</font>: {value}</div>"
header_html = header_html.format(key='last_update_date',
value=props['last_update_date'])
update_date_widget = widgets.HTML(value=header_html)
qubit_html = "<table>"
qubit_html += """<style>
table {
border-collapse: collapse;
width: auto;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f6f6f6;}
</style>"""
qubit_html += "<tr><th></th><th>Frequency</th><th>T1</th><th>T2</th>"
qubit_html += "<th>U1 gate error</th><th>U2 gate error</th><th>U3 gate error</th>"
qubit_html += "<th>Readout error</th></tr>"
qubit_footer = "</table>"
for qub in range(len(props['qubits'])):
name = 'Q%s' % qub
qubit_data = props['qubits'][qub]
gate_data = [g for g in props['gates'] if g['qubits'] == [qub]]
t1_info = qubit_data[0]
t2_info = qubit_data[1]
freq_info = qubit_data[2]
readout_info = qubit_data[3]
freq = str(round(freq_info['value'], 5))+' '+freq_info['unit']
T1 = str(round(t1_info['value'],
5))+' ' + t1_info['unit']
T2 = str(round(t2_info['value'],
5))+' ' + t2_info['unit']
for gd in gate_data:
if gd['gate'] == 'u1':
U1 = str(round(gd['parameters'][0]['value'], 5))
break
for gd in gate_data:
if gd['gate'] == 'u2':
U2 = str(round(gd['parameters'][0]['value'], 5))
break
for gd in gate_data:
if gd['gate'] == 'u3':
U3 = str(round(gd['parameters'][0]['value'], 5))
break
readout_error = round(readout_info['value'], 5)
qubit_html += "<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td>"
qubit_html += "<td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>"
qubit_html = qubit_html % (name, freq, T1, T2, U1, U2, U3, readout_error)
qubit_html += qubit_footer
qubit_widget = widgets.HTML(value=qubit_html)
out = widgets.VBox([update_date_widget,
qubit_widget])
return out
def gates_tab(backend):
"""The multiple qubit gate error widget.
Args:
backend (IBMQbackend): The backend.
Returns:
VBox: A VBox widget.
"""
props = backend.properties().to_dict()
multi_qubit_gates = [g for g in props['gates'] if len(g['qubits']) > 1]
header_html = "<div><font style='font-weight:bold'>{key}</font>: {value}</div>"
header_html = header_html.format(key='last_update_date',
value=props['last_update_date'])
update_date_widget = widgets.HTML(value=header_html,
layout=widgets.Layout(grid_area='top'))
gate_html = "<table>"
gate_html += """<style>
table {
border-collapse: collapse;
width: auto;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f6f6f6;};
</style>"""
gate_html += "<tr><th></th><th>Type</th><th>Gate error</th></tr>"
gate_footer = "</table>"
# Split gates into two columns
left_num = math.ceil(len(multi_qubit_gates)/3)
mid_num = math.ceil((len(multi_qubit_gates)-left_num)/2)
left_table = gate_html
for qub in range(left_num):
gate = multi_qubit_gates[qub]
qubits = gate['qubits']
ttype = gate['gate']
error = round(gate['parameters'][0]['value'], 5)
left_table += "<tr><td><font style='font-weight:bold'>%s</font>"
left_table += "</td><td>%s</td><td>%s</td></tr>"
left_table = left_table % ("{}{}_{}".format(ttype, qubits[0], qubits[1]),
ttype, error)
left_table += gate_footer
middle_table = gate_html
for qub in range(left_num, left_num+mid_num):
gate = multi_qubit_gates[qub]
qubits = gate['qubits']
ttype = gate['gate']
error = round(gate['parameters'][0]['value'], 5)
middle_table += "<tr><td><font style='font-weight:bold'>%s</font>"
middle_table += "</td><td>%s</td><td>%s</td></tr>"
middle_table = middle_table % ("{}{}_{}".format(ttype, qubits[0], qubits[1]),
ttype, error)
middle_table += gate_footer
right_table = gate_html
for qub in range(left_num+mid_num, len(multi_qubit_gates)):
gate = multi_qubit_gates[qub]
qubits = gate['qubits']
ttype = gate['gate']
error = round(gate['parameters'][0]['value'], 5)
right_table += "<tr><td><font style='font-weight:bold'>%s</font>"
right_table += "</td><td>%s</td><td>%s</td></tr>"
right_table = right_table % ("{}{}_{}".format(ttype, qubits[0], qubits[1]),
ttype, error)
right_table += gate_footer
left_table_widget = widgets.HTML(value=left_table,
layout=widgets.Layout(grid_area='left'))
middle_table_widget = widgets.HTML(value=middle_table,
layout=widgets.Layout(grid_area='middle'))
right_table_widget = widgets.HTML(value=right_table,
layout=widgets.Layout(grid_area='right'))
grid = widgets.GridBox(children=[update_date_widget,
left_table_widget,
middle_table_widget,
right_table_widget],
layout=widgets.Layout(
grid_template_rows='auto auto',
grid_template_columns='33% 33% 33%',
grid_template_areas='''
"top top top"
"left middle right"
''',
grid_gap='0px 0px'))
return grid
def detailed_map(backend):
"""Widget for displaying detailed noise map.
Args:
backend (IBMQbackend): The backend.
Returns:
GridBox: Widget holding noise map images.
"""
error_widget = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center'))
with error_widget:
display(plot_error_map(backend, figsize=(11, 9), show_title=False))
return error_widget
def job_history(backend):
"""Widget for displaying job history
Args:
backend (IBMQbackend): The backend.
Returns:
Tab: A tab widget for history images.
"""
year = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
month = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
week = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
tabs = widgets.Tab(layout=widgets.Layout(max_height='620px'))
tabs.children = [year, month, week]
tabs.set_title(0, 'Year')
tabs.set_title(1, 'Month')
tabs.set_title(2, 'Week')
tabs.selected_index = 1
_build_job_history(tabs, backend)
return tabs
def _build_job_history(tabs, backend):
past_year_date = datetime.datetime.now() - datetime.timedelta(days=365)
date_filter = {'creationDate': {'gt': past_year_date.isoformat()}}
jobs = backend.jobs(limit=None, db_filter=date_filter)
with tabs.children[0]:
year_plot = plot_job_history(jobs, interval='year')
display(year_plot)
plt.close(year_plot)
with tabs.children[1]:
month_plot = plot_job_history(jobs, interval='month')
display(month_plot)
plt.close(month_plot)
with tabs.children[2]:
week_plot = plot_job_history(jobs, interval='week')
display(week_plot)
plt.close(week_plot)
def plot_job_history(jobs, interval='year'):
"""Plots the job history of the user from the given list of jobs.
Args:
jobs (list): A list of jobs with type IBMQjob.
interval (str): Interval over which to examine.
Returns:
fig: A Matplotlib figure instance.
"""
def get_date(job):
"""Returns a datetime object from a IBMQJob instance.
Args:
job (IBMQJob): A job.
Returns:
dt: A datetime object.
"""
return datetime.datetime.strptime(job.creation_date(),
'%Y-%m-%dT%H:%M:%S.%fZ')
current_time = datetime.datetime.now()
if interval == 'year':
bins = [(current_time - datetime.timedelta(days=k*365/12))
for k in range(12)]
elif interval == 'month':
bins = [(current_time - datetime.timedelta(days=k)) for k in range(30)]
elif interval == 'week':
bins = [(current_time - datetime.timedelta(days=k)) for k in range(7)]
binned_jobs = [0]*len(bins)
if interval == 'year':
for job in jobs:
for ind, dat in enumerate(bins):
date = get_date(job)
if date.month == dat.month:
binned_jobs[ind] += 1
break
else:
continue
else:
for job in jobs:
for ind, dat in enumerate(bins):
date = get_date(job)
if date.day == dat.day and date.month == dat.month:
binned_jobs[ind] += 1
break
else:
continue
nz_bins = []
nz_idx = []
for ind, val in enumerate(binned_jobs):
if val != 0:
nz_idx.append(ind)
nz_bins.append(val)
total_jobs = sum(binned_jobs)
colors = ['#003f5c', '#ffa600', '#374c80', '#ff764a',
'#7a5195', '#ef5675', '#bc5090']
if interval == 'year':
labels = ['{}-{}'.format(str(bins[b].year)[2:], MONTH_NAMES[bins[b].month]) for b in nz_idx]
else:
labels = ['{}-{}'.format(MONTH_NAMES[bins[b].month], bins[b].day) for b in nz_idx]
fig, ax = plt.subplots(1, 1, figsize=(5.5, 5.5)) # pylint: disable=invalid-name
ax.pie(nz_bins[::-1], labels=labels, colors=colors, textprops={'fontsize': 14},
rotatelabels=True, counterclock=False, radius=1)
ax.add_artist(Circle((0, 0), 0.7, color='white', zorder=1))
ax.text(0, 0, total_jobs, horizontalalignment='center',
verticalalignment='center', fontsize=26)
return fig
| apache-2.0 |
cemoody/lda2vec | examples/twenty_newsgroups/lda2vec/lda2vec_run.py | 1 | 4411 | # Author: Chris Moody <[email protected]>
# License: MIT
# This simple example loads the newsgroups data from sklearn
# and train an LDA-like model on it
import os
import os.path
import pickle
import time
import shelve
import chainer
from chainer import cuda
from chainer import serializers
import chainer.optimizers as O
import numpy as np
from lda2vec import utils
from lda2vec import prepare_topics, print_top_words_per_topic, topic_coherence
from lda2vec_model import LDA2Vec
gpu_id = int(os.getenv('CUDA_GPU', 0))
cuda.get_device(gpu_id).use()
print "Using GPU " + str(gpu_id)
data_dir = os.getenv('data_dir', '../data/')
fn_vocab = '{data_dir:s}/vocab.pkl'.format(data_dir=data_dir)
fn_corpus = '{data_dir:s}/corpus.pkl'.format(data_dir=data_dir)
fn_flatnd = '{data_dir:s}/flattened.npy'.format(data_dir=data_dir)
fn_docids = '{data_dir:s}/doc_ids.npy'.format(data_dir=data_dir)
fn_vectors = '{data_dir:s}/vectors.npy'.format(data_dir=data_dir)
vocab = pickle.load(open(fn_vocab, 'r'))
corpus = pickle.load(open(fn_corpus, 'r'))
flattened = np.load(fn_flatnd)
doc_ids = np.load(fn_docids)
vectors = np.load(fn_vectors)
# Model Parameters
# Number of documents
n_docs = doc_ids.max() + 1
# Number of unique words in the vocabulary
n_vocab = flattened.max() + 1
# 'Strength' of the dircihlet prior; 200.0 seems to work well
clambda = 200.0
# Number of topics to fit
n_topics = int(os.getenv('n_topics', 20))
batchsize = 4096
# Power for neg sampling
power = float(os.getenv('power', 0.75))
# Intialize with pretrained word vectors
pretrained = bool(int(os.getenv('pretrained', True)))
# Sampling temperature
temperature = float(os.getenv('temperature', 1.0))
# Number of dimensions in a single word vector
n_units = int(os.getenv('n_units', 300))
# Get the string representation for every compact key
words = corpus.word_list(vocab)[:n_vocab]
# How many tokens are in each document
doc_idx, lengths = np.unique(doc_ids, return_counts=True)
doc_lengths = np.zeros(doc_ids.max() + 1, dtype='int32')
doc_lengths[doc_idx] = lengths
# Count all token frequencies
tok_idx, freq = np.unique(flattened, return_counts=True)
term_frequency = np.zeros(n_vocab, dtype='int32')
term_frequency[tok_idx] = freq
for key in sorted(locals().keys()):
val = locals()[key]
if len(str(val)) < 100 and '<' not in str(val):
print key, val
model = LDA2Vec(n_documents=n_docs, n_document_topics=n_topics,
n_units=n_units, n_vocab=n_vocab, counts=term_frequency,
n_samples=15, power=power, temperature=temperature)
if os.path.exists('lda2vec.hdf5'):
print "Reloading from saved"
serializers.load_hdf5("lda2vec.hdf5", model)
if pretrained:
model.sampler.W.data[:, :] = vectors[:n_vocab, :]
model.to_gpu()
optimizer = O.Adam()
optimizer.setup(model)
clip = chainer.optimizer.GradientClipping(5.0)
optimizer.add_hook(clip)
j = 0
epoch = 0
fraction = batchsize * 1.0 / flattened.shape[0]
progress = shelve.open('progress.shelve')
for epoch in range(200):
data = prepare_topics(cuda.to_cpu(model.mixture.weights.W.data).copy(),
cuda.to_cpu(model.mixture.factors.W.data).copy(),
cuda.to_cpu(model.sampler.W.data).copy(),
words)
top_words = print_top_words_per_topic(data)
if j % 100 == 0 and j > 100:
coherence = topic_coherence(top_words)
for j in range(n_topics):
print j, coherence[(j, 'cv')]
kw = dict(top_words=top_words, coherence=coherence, epoch=epoch)
progress[str(epoch)] = pickle.dumps(kw)
data['doc_lengths'] = doc_lengths
data['term_frequency'] = term_frequency
np.savez('topics.pyldavis', **data)
for d, f in utils.chunks(batchsize, doc_ids, flattened):
t0 = time.time()
optimizer.zero_grads()
l = model.fit_partial(d.copy(), f.copy())
prior = model.prior()
loss = prior * fraction
loss.backward()
optimizer.update()
msg = ("J:{j:05d} E:{epoch:05d} L:{loss:1.3e} "
"P:{prior:1.3e} R:{rate:1.3e}")
prior.to_cpu()
loss.to_cpu()
t1 = time.time()
dt = t1 - t0
rate = batchsize / dt
logs = dict(loss=float(l), epoch=epoch, j=j,
prior=float(prior.data), rate=rate)
print msg.format(**logs)
j += 1
serializers.save_hdf5("lda2vec.hdf5", model)
| mit |
madjelan/CostSensitiveClassification | costcla/probcal/probcal.py | 1 | 7638 | # coding=utf-8
"""Methods to calibrate the estimated probabilities.
"""
# Authors: Alejandro Correa Bahnsen <[email protected]>
# License: BSD 3 clause
from sklearn.metrics import roc_curve
import numpy as np
#TODO: Add isotonic regression from sklearn
#TODO: Add Platt calibration
# http://ift.tt/XuMk3s
class ROCConvexHull:
"""Implementation the the calibration method ROCConvexHull
Attributes
----------
`calibration_map` : array-like
calibration map for maping the raw probabilities to the calibrated probabilities.
See also
--------
sklearn.IsotonicRegression
References
----------
.. [1] J. Hernandez-Orallo, P. Flach, C. Ferri, 'A Unified View of Performance Metrics :
Translating Threshold Choice into Expected Classification Loss', Journal of
Machine Learning Research, 13, 2813–2869, 2012.
Examples
--------
>>> from costcla.probcal import ROCConvexHull
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.cross_validation import train_test_split
>>> from costcla.datasets import load_creditscoring1
>>> from costcla.metrics import brier_score_loss
>>> data = load_creditscoring1()
>>> sets = train_test_split(data.data, data.target, data.cost_mat, test_size=0.33, random_state=0)
>>> X_train, X_test, y_train, y_test, cost_mat_train, cost_mat_test = sets
>>> f = RandomForestClassifier()
>>> f.fit(X_train, y_train)
>>> y_prob_test = f.predict_proba(X_test)
>>> f_cal = ROCConvexHull()
>>> f_cal.fit(y_test, y_prob_test)
>>> y_prob_test_cal = f_cal.predict_proba(y_prob_test)
>>> # Brier score using only RandomForest
>>> print brier_score_loss(y_test, y_prob_test[:, 1])
0.0577615264881
>>> # Brier score using calibrated RandomForest
>>> print brier_score_loss(y_test, y_prob_test_cal)
0.0553677407894
"""
def __init__(self):
self.calibration_map = []
def fit(self, y, p):
""" Fit the calibration map
Parameters
----------
y_true : array-like of shape = [n_samples]
True class to be used for calibrating the probabilities
y_prob : array-like of shape = [n_samples, 2]
Predicted probabilities to be used for calibrating the probabilities
Returns
-------
self : object
Returns self.
"""
# TODO: Check input
if p.size != p.shape[0]:
p = p[:, 1]
fpr, tpr, thresholds = roc_curve(y, p)
#works with sklearn 0.11
if fpr.min() > 0 or tpr.min() > 0:
fpr = np.hstack((0, fpr))
tpr = np.hstack((0, tpr))
thresholds = np.hstack((1.01, thresholds))
def prob_freq(y, predict_proba):
#calculate distribution and return in inverse order
proba_bins = np.unique(predict_proba)
freq_all = np.bincount(proba_bins.searchsorted(predict_proba))
freq_0_tempa = np.unique(predict_proba[np.nonzero(y == 0)[0]])
freq_0_tempb = np.bincount(freq_0_tempa.searchsorted(predict_proba[np.nonzero(y == 0)[0]]))
freq = np.zeros((proba_bins.shape[0], 3))
freq[:, 0] = proba_bins
for i in range(freq_0_tempa.shape[0]):
freq[np.nonzero(proba_bins == freq_0_tempa[i])[0], 1] = freq_0_tempb[i]
freq[:, 2] = freq_all - freq[:, 1]
freq = freq[proba_bins.argsort()[::-1], :]
pr = freq[:, 2] / freq[:, 1:].sum(axis=1)
pr = pr.reshape(freq.shape[0], 1)
#fix when no negatives in range
pr[pr == 1.0] = 0
freq = np.hstack((freq, pr))
return freq
f = prob_freq(y, p)
temp_hull = []
for i in range(fpr.shape[0]):
temp_hull.append((fpr[i], tpr[i]))
#close the plane
temp_hull.append((1, 0))
rocch_ = _convexhull(temp_hull)
rocch = np.array([(a, b) for (a, b) in rocch_[:-1]])
rocch_find = np.zeros(fpr.shape[0], dtype=np.bool)
for i in range(rocch.shape[0]):
rocch_find[np.intersect1d(np.nonzero(rocch[i, 0] == fpr)[0],
np.nonzero(rocch[i, 1] == tpr)[0])] = True
rocch_thresholds = thresholds[rocch_find]
#calibrated probabilities using ROCCH
f_cal = np.zeros((rocch_thresholds.shape[0] - 1, 5))
for i in range(rocch_thresholds.shape[0] - 1):
f_cal[i, 0] = rocch_thresholds[i]
f_cal[i, 1] = rocch_thresholds[i + 1]
join_elements = np.logical_and(f_cal[i, 1] <= f[:, 0], f_cal[i, 0] > f[:, 0])
f_cal[i, 2] = f[join_elements, 1].sum()
f_cal[i, 3] = f[join_elements, 2].sum()
f_cal[:, 4] = f_cal[:, 3] / f_cal[:, [2, 3]].sum(axis=1)
#fix to add 0
f_cal[-1, 1] = 0
calibrated_map = f_cal[:, [0, 1, 4]]
self.calibration_map = calibrated_map
def predict_proba(self, p):
""" Calculate the calibrated probabilities
Parameters
----------
y_prob : array-like of shape = [n_samples, 2]
Predicted probabilities to be calibrated using calibration map
Returns
-------
y_prob_cal : array-like of shape = [n_samples, 1]
Predicted calibrated probabilities
"""
# TODO: Check input
if p.size != p.shape[0]:
p = p[:, 1]
calibrated_proba = np.zeros(p.shape[0])
for i in range(self.calibration_map.shape[0]):
calibrated_proba[np.logical_and(self.calibration_map[i, 1] <= p, self.calibration_map[i, 0] > p)] = \
self.calibration_map[i, 2]
# TODO: return 2D and refactor
return calibrated_proba
def _convexhull(P):
""" Private function that calculate the convex hull of a set of points
The algorithm was taken from [1].
http://code.activestate.com/recipes/66527-finding-the-convex-hull-of-a-set-of-2d-points/
References
----------
.. [1] Alex Martelli, Anna Ravenscroft, David Ascher, 'Python Cookbook', O'Reilly Media, Inc., 2005.
"""
def mydet(p, q, r):
"""Calc. determinant of a special matrix with three 2D points.
The sign, "-" or "+", determines the side, right or left,
respectivly, on which the point r lies, when measured against
a directed vector from p to q.
"""
# We use Sarrus' Rule to calculate the determinant.
# (could also use the Numeric package...)
sum1 = q[0] * r[1] + p[0] * q[1] + r[0] * p[1]
sum2 = q[0] * p[1] + r[0] * q[1] + p[0] * r[1]
return sum1 - sum2
def isrightturn((p, q, r)):
"Do the vectors pq:qr form a right turn, or not?"
assert p != q and q != r and p != r
if mydet(p, q, r) < 0:
return 1
else:
return 0
# Get a local list copy of the points and sort them lexically.
points = map(None, P)
points.sort()
# Build upper half of the hull.
upper = [points[0], points[1]]
for p in points[2:]:
upper.append(p)
while len(upper) > 2 and not isrightturn(upper[-3:]):
del upper[-2]
# Build lower half of the hull.
points.reverse()
lower = [points[0], points[1]]
for p in points[2:]:
lower.append(p)
while len(lower) > 2 and not isrightturn(lower[-3:]):
del lower[-2]
# Remove duplicates.
del lower[0]
del lower[-1]
# Concatenate both halfs and return.
return tuple(upper + lower)
| bsd-3-clause |
kadrlica/obztak | obztak/delve.py | 1 | 35300 | #!/usr/bin/env python
"""
Code related to the DECam Dwarf Galaxy Survey.
"""
import os,sys
import logging
import copy
from collections import OrderedDict as odict
import pandas as pd
import numpy as np
import fitsio
from obztak.field import FieldArray, SISPI_DICT, SEP
from obztak.survey import Survey
from obztak.scheduler import Scheduler
from obztak.tactician import Tactician
from obztak.utils.projector import cel2gal, angsep
from obztak.utils import constants
from obztak.utils import fileio
from obztak.utils.constants import BANDS,SMASH_POLE,CCD_X,CCD_Y,STANDARDS,DECAM
from obztak.utils.constants import COLORS, CMAPS
from obztak.utils.date import datestring, setdefaults, nite2utc,utc2nite,datestr
NAME = 'DELVE'
PROGRAM = NAME.lower()
PROPID = '2019A-0305'
PROPOSER = 'Drlica-Wagner'
BANDS = ['g','r','i']
TILINGS = [1,2,3,4]
DONE = -1
TEFF_MIN_WIDE = pd.DataFrame(dict(FILTER=['g','i'],TEFF=[0.4,0.5]))
TEFF_MIN_MC = pd.DataFrame(dict(FILTER=['g','r','i'],TEFF=[0.3,0.3,0.45]))
# Seeing limits for DELVE survey components
FWHM_DEEP = 0.9 # arcsec
FWHM_MC = 1.1 # arcsec
class DelveSurvey(Survey):
""" Survey sublcass for BLISS. """
# 2019A SCHEDULED
nights_2019A = [
['2019/02/07', 'second'], # phase=%, set=
['2019/02/08', 'second'], # phase=%, set=
['2019/02/09', 'second'], # phase=%, set=
['2019/02/12', 'full '], # phase=%, set=
['2019/02/13', 'full '], # phase=%, set=
['2019/02/14', 'second'], # phase=%, set=
['2019/02/15', 'full '], # phase=%, set=
['2019/02/24', 'second'], # phase=%, set=
['2019/02/25', 'second'], # phase=%, set=
['2019/02/26', 'second'], # phase=%, set=
['2019/02/27', 'second'], # phase=%, set=
['2019/02/28', 'second'], # phase=%, set=
['2019/03/01', 'second'], # phase=%, set=
['2019/05/12', 'full '], # phase=%, set=
['2019/05/13', 'full '], # phase=%, set=
['2019/05/28', 'second'], # phase=%, set=
['2019/05/29', 'second'], # phase=%, set=
['2019/05/30', 'second'], # phase=%, set=
['2019/05/31', 'second'], # phase=%, set=
['2019/06/01', 'second'], # phase=%, set=
['2019/06/02', 'second'], # phase=%, set=
['2019/06/03', 'second'], # phase=%, set=
['2019/06/04', 'second'], # phase=%, set=
['2019/06/05', 'full '], # phase=%, set=
['2019/06/06', 'full '], # phase=%, set=
['2019/06/07', 'full '], # phase=%, set=
['2019/06/08', 'full '], # phase=%, set=
['2019/06/09', 'full '], # phase=%, set=
['2019/06/23', 'second'], # phase=%, set=
['2019/06/24', 'second'], # phase=%, set=
['2019/06/25', 'second'], # phase=%, set=
['2019/06/26', 'second'], # phase=%, set=
['2019/06/27', 'second'], # phase=%, set=
['2019/06/28', 'second'], # phase=%, set=
]
nights_2019B = [
['2019/08/07', 'first'], # phase=%, set=
['2019/08/08', 'full'], # phase=%, set=
['2019/08/09', 'full'], # phase=%, set=
['2019/08/10', 'full'], # phase=%, set=
['2019/08/20', 'first'], # phase=%, set=
['2019/08/21', 'first'], # phase=%, set=
['2019/08/30', 'first'], # phase=%, set=
['2019/09/06', 'first'], # phase=%, set=
['2019/09/07', 'first'], # phase=%, set=
['2019/09/08', 'first'], # phase=%, set=
['2019/09/09', 'first'], # phase=%, set=
['2019/10/07', 'first'], # phase=%, set=
['2019/10/08', 'first'], # phase=%, set=
['2019/10/09', 'first'], # phase=%, set=
['2019/11/04', 'second'], # phase=%, set=
['2019/11/05', 'second'], # phase=%, set=
['2019/11/06', 'second'], # phase=%, set=
['2019/11/16', 'second'], # phase=%, set=
['2019/11/17', 'second'], # phase=%, set=
['2019/11/18', 'second'], # phase=%, set=
['2019/12/01', 'second'], # phase=%, set=
['2019/12/02', 'second'], # phase=%, set=
['2019/12/03', 'second'], # phase=%, set=
['2019/12/04', 'second'], # phase=%, set=
['2019/12/05', 'second'], # phase=%, set=
['2019/12/06', 'second'], # phase=%, set=
['2019/12/07', 'second'], # phase=%, set=
['2019/12/20', 'second'], # phase=%, set=
['2019/12/21', 'full'], # phase=%, set=
['2019/12/22', 'first'], # phase=%, set=
['2019/12/23', 'full'], # phase=%, set=
['2020/01/20', 'second'], # phase=%, set=
['2020/01/21', 'second'], # phase=%, set=
['2020/01/27', 'second'], # phase=%, set=
['2020/01/28', 'second'], # phase=%, set=
['2020/01/29', 'second'], # phase=%, set=
['2020/01/30', 'second'], # phase=%, set=
['2020/01/31', 'second'], # phase=%, set=
]
# 2020A SCHEDULED
nights_2020A = [
['2020/02/05','full' ],
['2020/02/06','full' ],
['2020/02/07','full' ],
['2020/02/11','second'],
['2020/02/12','full' ],
['2020/03/01','first' ],
['2020/03/02','first' ],
['2020/03/05','first' ],
['2020/03/06','first' ],
['2020/03/15','first' ],
#['2020/03/28','first' ],
#['2020/03/29','first' ],
#['2020/03/30','first' ],
#['2020/03/31','first' ],
#['2020/04/01','first' ],
#['2020/04/04','first' ],
#['2020/04/05','first' ],
#['2020/04/07','first' ],
#['2020/04/17','first' ],
#['2020/04/18','first' ],
#['2020/04/19','first' ],
#['2020/04/20','first' ],
#['2020/05/01','first' ],
#['2020/05/02','first' ],
#['2020/05/03','first' ],
#['2020/05/04','first' ],
#['2020/05/05','first' ],
#['2020/05/06','first' ],
#['2020/05/21','first' ],
#['2020/05/31','full' ],
#['2020/06/01','full' ],
#['2020/06/29','second'],
#['2020/06/30','second'],
#['2020/07/23','second'],
#['2020/07/27','second'],
#['2020/07/28','second'],
#['2020/07/29','second'],
#['2020/07/31','second'],
]
nights_2020B = [
['2020/10/24','first' ],
['2020/10/25','full' ],
['2020/11/04','full' ],
['2020/11/05','full' ],
['2020/11/06','full' ],
['2020/11/18','full' ],
['2020/11/19','full' ],
['2020/11/20','full' ],
['2020/11/21','full' ],
['2020/11/24','full' ],
['2020/11/25','full' ],
['2020/11/26','second'],
['2020/12/05','full' ],
['2020/12/20','full' ],
['2021/01/02','first' ],
['2021/01/03','first' ],
['2021/01/04','first' ],
['2021/01/06','second'],
['2021/01/12','full' ],
['2021/01/15','full' ],
['2021/01/16','full' ],
['2021/01/21','first' ],
['2021/01/22','first' ],
['2021/01/23','first' ],
]
extra_nights = []
nights = nights_2019A + nights_2019B + nights_2020A + nights_2020B + extra_nights
def prepare_fields(self, infile=None, outfile=None, plot=True, **kwargs):
""" Create the list of fields to be targeted by this survey.
Parameters:
-----------
infile : File containing all possible field locations.
outfile: Output file of selected fields
plot : Create an output plot of selected fields.
Returns:
--------
fields : A FieldArray of the selected fields.
"""
if infile is None:
infile = fileio.get_datafile('decam-tiles-bliss-v1.fits.gz')
#infile = fileio.get_datafile('decam-tiles-smash-v1.fits.gz')
#infile = fileio.get_datafile('decam-tiles-decals-v1.fits.gz')
logging.info("Reading tiles from: %s"%os.path.basename(infile))
data = fitsio.read(infile)
deep_fields = self.create_deep_fields(data)
mc_fields = self.create_mc_fields(data)
wide_fields = self.create_wide_fields(data)
fields = wide_fields + mc_fields + deep_fields
# blacklist
blacklist = ['5716-01-g','5716-01-i'] # flame nebula
fields['PRIORITY'][np.in1d(fields.unique_id,blacklist)] = DONE
if plot:
import pylab as plt
import skymap.survey
plt.ion()
sel = [fields['PRIORITY'] > 0]
plt.figure()
smap = skymap.survey.MaglitesSkymap()
smap.draw_fields(fields[sel],alpha=0.3,edgecolor='none')
smap.draw_des(c='r')
smap.draw_milky_way()
smap.draw_smash()
plt.figure()
smap = skymap.survey.SurveyMcBryde()
smap.draw_fields(fields[sel],alpha=0.3,edgecolor='none')
smap.draw_des(c='r')
smap.draw_milky_way()
smap.draw_smash()
if outfile:
plt.savefig(os.path.splitext(outfile)[0]+'.png',bbox_inches='tight')
if not sys.flags.interactive:
plt.show(block=True)
if outfile:
print("Writing %s..."%outfile)
fields.write(outfile)
return fields
@classmethod
def update_covered_fields(cls, fields):
""" Update the priority of covered fields. """
fields = copy.deepcopy(fields)
frac, depth = cls.covered(fields)
done = (fields['PRIORITY'] == DONE)
print("Found %i exposures already done."%done.sum())
wide = np.char.endswith(fields['PROGRAM'],'-wide')
teff_min_wide = pd.DataFrame(fields).merge(TEFF_MIN_WIDE,on='FILTER',how='left').to_records()['TEFF']
covered_wide = depth > teff_min_wide*fields['TILING']*fields['EXPTIME']
done_wide = wide & covered_wide
print('Found %i WIDE exposures newly done.'%(done_wide & ~done).sum())
mc = np.char.endswith(fields['PROGRAM'],'-mc')
teff_min_mc = pd.DataFrame(fields).merge(TEFF_MIN_MC,on='FILTER',how='left').to_records()['TEFF']
covered_mc = depth > teff_min_mc*fields['TILING']*fields['EXPTIME']
done_mc = mc & covered_mc
print('Found %i MC exposures newly done.'%(done_mc & ~done).sum())
fields['PRIORITY'][done_wide & ~done] = DONE
fields['PRIORITY'][done_mc & ~done] = DONE
return fields
def create_wide_fields(self, data, plot=False):
""" Create the wide field observations """
logging.info("Creating DEEP fields...")
BANDS = ['g','i']
EXPTIME = [90,90]
TILINGS = [4,4]
TEFF_MIN = TEFF_MIN_WIDE
nhexes = len(np.unique(data['TILEID']))
nbands = len(BANDS)
nfields = len(data)*nbands
logging.info(" Number of hexes: %d"%nhexes)
logging.info(" Filters: %s"%BANDS)
logging.info(" Exposure time: %s"%EXPTIME)
logging.info(" Tilings: %s"%TILINGS)
fields = FieldArray(nfields)
fields['PROGRAM'] = PROGRAM+'-wide'
fields['HEX'] = np.repeat(data['TILEID'],nbands)
fields['TILING'] = np.repeat(data['PASS'],nbands)
fields['RA'] = np.repeat(data['RA'],nbands)
fields['DEC'] = np.repeat(data['DEC'],nbands)
fields['FILTER'] = np.tile(BANDS,len(data))
fields['EXPTIME'] = np.tile(EXPTIME,len(data))
fields['PRIORITY'] = fields['TILING']
sel = self.footprintWIDE(fields['RA'],fields['DEC'])
sel &= (~self.footprintMilkyWay(fields['RA'],fields['DEC']))
sel &= (~self.footprintDES(fields['RA'],fields['DEC']))
sel &= (~self.footprintSMASH(fields['RA'],fields['DEC'],angsep=0.75*DECAM))
sel &= (~self.footprintMC(fields['RA'],fields['DEC']))
# Avoid DEEP fields? No.
#sel &= (~self.footprintDEEP(fields['RA'],fields['DEC']))
fields = fields[sel]
frac, depth = self.covered(fields)
teffmin = pd.DataFrame(fields).merge(TEFF_MIN,on='FILTER').to_records()['TEFF']
fields['PRIORITY'][depth > teffmin*fields['TILING']*fields['EXPTIME']] = DONE
# Avoid MagLiteS-II for now
#fields['PRIORITY'][self.footprintMaglites2(fields['RA'],fields['DEC'])] = DONE
if plot: self.plot_depth(fields,depth,'delve-wide-%s-gt%i.png')
logging.info("Number of target fields: %d"%len(fields))
outfile = 'delve-wide-fields.fits.fz'
logging.info("Writing %s..."%outfile)
fields.write(outfile,clobber=True)
return fields
def create_mc_fields(self, data, plot=False):
""" Select fields around the LMC """
logging.info("Creating MC fields...")
BANDS = ['g','r','i']
EXPTIME = [267,267,333]
TILINGS = [4, 4, 4]
TEFF_MIN = TEFF_MIN_MC
nhexes = len(np.unique(data['TILEID']))
nbands = len(BANDS)
nfields = len(data)*nbands
logging.info(" Number of hexes: %d"%nhexes)
logging.info(" Filters: %s"%BANDS)
logging.info(" Exposure time: %s"%EXPTIME)
logging.info(" Tilings: %s"%TILINGS)
fields = FieldArray(nfields)
fields['PROGRAM'] = PROGRAM+'-mc'
fields['HEX'] = np.repeat(data['TILEID'],nbands)
fields['TILING'] = np.repeat(data['PASS'],nbands)
fields['RA'] = np.repeat(data['RA'],nbands)
fields['DEC'] = np.repeat(data['DEC'],nbands)
fields['FILTER'] = np.tile(BANDS,len(data))
fields['EXPTIME'] =np.tile(EXPTIME,len(data))
fields['PRIORITY'] = fields['TILING']
sel = self.footprintMC(fields['RA'],fields['DEC'])
sel &= (~self.footprintDES(fields['RA'],fields['DEC']))
sel &= (~self.footprintSMASH(fields['RA'],fields['DEC'],angsep=0.75*DECAM))
sel &= (~self.footprintMilkyWay(fields['RA'],fields['DEC']))
fields = fields[sel]
frac, depth = self.covered(fields)
teffmin = pd.DataFrame(fields).merge(TEFF_MIN,on='FILTER').to_records()['TEFF']
fields['PRIORITY'][depth > teffmin*fields['TILING']*fields['EXPTIME']] = DONE
# Avoid MagLiteS-II for now
fields['PRIORITY'][self.footprintMaglites2(fields['RA'],fields['DEC'])] = DONE
if plot: self.plot_depth(fields,depth,'delve-mc-%s-gt%i.png',proj='maglites')
logging.info("Number of target fields: %d"%len(fields))
outfile = 'delve-mc-fields.fits.fz'
logging.info("Writing %s..."%outfile)
fields.write(outfile,clobber=True)
return fields
def create_deep_fields(self, data, plot=False):
logging.info("Creating DEEP fields...")
BANDS = ['g','i']
EXPTIME = [300,300]
TILINGS = [15,10]
dirname = '/Users/kadrlica/delve/observing/data'
hexbase = 100000 # hex offset
# target number and filename
basenames = odict([
('SextansB', (000, 'sextansB_fields_v3.txt')),
('IC5152', (100, 'ic5152_fields_v3.txt')),
('NGC300', (200, 'ngc300_fields_v3.txt')),
('NGC55', (300, 'ngc55_fields_v3.txt')),
])
fields = FieldArray()
for name,(num,basename) in basenames.items():
filename = os.path.join(dirname,basename)
target = np.genfromtxt(filename,names=True,dtype=None)
f = FieldArray(len(target))
filters = np.tile(BANDS,len(f))
exptimes = np.tile(EXPTIME,len(f))
f['RA'] = target['ra']
f['DEC'] = target['dec']
f['HEX'] = hexbase + num + target['field']
f['TILING'] = target['tiling']
#f['PRIORITY'] = num + target['priority']
f['PRIORITY'] = target['priority']
f['PROGRAM'] = PROGRAM+'-deep'
# Group the fields by hex/tiling
f = np.repeat(f,len(BANDS))
f['FILTER'] = filters
f['EXPTIME'] = exptimes
for (b,t) in zip(BANDS, TILINGS):
f = f[~((f['FILTER'] == b) & (f['TILING'] > t))]
# Not doing these deep fields
if num in [000,100,200]:
f['PRIORITY'] *= DONE
fields = fields + f
exclude = [100001, 100002, 100003, 100004, 100007, 100008, 100012,
100013, 100016, 100017, 100018, 100019]
fields = fields[~np.in1d(fields['HEX'],exclude)]
nhexes = len(np.unique(fields['HEX']))
logging.info(" Number of hexes: %d"%nhexes)
logging.info(" Filters: %s"%BANDS)
logging.info(" Exposure time: %s"%EXPTIME)
if plot: self.plot_depth(fields,depth,'delve-deep-%s-gt%i.png')
logging.info("Number of target fields: %d"%len(fields))
outfile = 'delve-deep-fields.fits.fz'
logging.info("Writing %s..."%outfile)
fields.write(outfile,clobber=True)
return fields
def create_deep_fields2(self, data, plot=False):
""" DEPRECATED: Create the deep field observations """
logging.info("Creating DEEP fields...")
BANDS = ['g','i']
EXPTIME = [300,300]
TILINGS = [15,10]
d = data[data['PASS'] == 1]
sel = self.footprintDEEP(d['RA'],d['DEC'])
data = np.copy(d[sel])
nhexes = len(np.unique(data['TILEID']))
ntilings = np.sum(TILINGS)
nbands = len(BANDS)
nfields = np.sum( np.sum(sel) * np.array(TILINGS))
logging.info(" Number of hexes: %d"%nhexes)
logging.info(" Filters: %s"%BANDS)
logging.info(" Exposure time: %s"%EXPTIME)
logging.info(" Tilings: %s"%TILINGS)
tilings = np.array(range(1,TILINGS[0]+1)+range(1,TILINGS[1]+1))
filters = np.repeat(BANDS,TILINGS)
exptimes = np.repeat(EXPTIME,TILINGS)
fields = FieldArray(nfields)
fields['PROGRAM'] = PROGRAM+'-deep'
fields['HEX'] = np.repeat(data['TILEID'],ntilings)
fields['RA'] = np.repeat(data['RA'],ntilings)
fields['DEC'] = np.repeat(data['DEC'],ntilings)
fields['EXPTIME'] = np.tile(exptimes,nhexes)
fields['TILING'] = np.tile(tilings,nhexes)
fields['FILTER'] = np.tile(filters,nhexes)
fields['PRIORITY'] = fields['TILING']
frac, depth = self.covered(fields)
fields['PRIORITY'][depth > fields['TILING']*fields['EXPTIME']] = DONE
if plot: self.plot_depth(fields,depth,'delve-deep-%s-gt%i.png')
logging.info("Number of target fields: %d"%len(fields))
outfile = 'delve-deep-fields.fits.fz'
logging.info("Writing %s..."%outfile)
fields.write(outfile,clobber=True)
return fields
@staticmethod
def footprintDEEP(ra,dec):
""" Selecting exposures around the deep drilling fields """
ra,dec = np.copy(ra), np.copy(dec)
sel = np.zeros(len(ra),dtype=bool)
filename = fileio.get_datafile('LV_MC_analogs_DECam.txt')
targets = np.genfromtxt(filename,names=True,dtype=None)
for t in targets:
sel |= (angsep(t['RA'],t['Dec'],ra,dec) < t['r_vir'])
return sel
@staticmethod
def footprintMC(ra,dec):
""" Selecting exposures around the Magellanic Clouds """
ra,dec = np.copy(ra), np.copy(dec)
sel = angsep(constants.RA_LMC,constants.DEC_LMC,ra,dec) < 25.0
sel |= angsep(constants.RA_SMC,constants.DEC_SMC,ra,dec) < 15.0
return sel
@staticmethod
def footprintWIDE(ra,dec):
""" Selecting wide-field exposures plane """
ra,dec = np.copy(ra), np.copy(dec)
sel = (dec < 0)
return sel
@staticmethod
def footprintMaglites2(ra,dec):
from obztak.maglites2 import Maglites2Survey
return Maglites2Survey.footprint(ra,dec)
@staticmethod
def covered(fields, percent=67.):
"""
Determine which fields haven't been previously covered by DECam
Parameters:
-----------
fields : field information
percent: fraction of the field that is covered
Returns:
--------
sel, frac : selection of fields and coverage fraction
"""
import healpy as hp
# These maps are SUM(teff * exptime)
#dirname = '/Users/kadrlica/delve/observing/data'
dirname = '/Users/kadrlica/delve/observing/v2/maps-20201024'
basename = 'decam_sum_expmap_%s_n1024.fits.gz'
sel = np.ones(len(fields),dtype=bool)
frac = np.zeros(len(fields),dtype=float)
depth = np.zeros(len(fields),dtype=float)
ra,dec,band=fields['RA'],fields['DEC'],fields['FILTER']
for b in np.unique(band):
idx = (band==b)
filename = os.path.join(dirname,basename%b)
logging.info("Reading %s..."%os.path.basename(filename))
skymap = hp.read_map(filename,verbose=False)
nside = hp.get_nside(skymap)
vec = hp.ang2vec(np.radians(90.-dec[idx]),np.radians(ra[idx]))
f,d = [],[]
for i,v in enumerate(vec):
print '\r%s/%s'%(i+1,len(vec)),
sys.stdout.flush()
pix = hp.query_disc(nside,v,np.radians(constants.DECAM))
# Find the 33rd percentile sum of effective exposure time
# i.e., 67% of the pixels have a larger SUM(teff * exptime)
d.append( np.percentile(skymap[pix],100-percent))
# Find the detection fraction
f.append((skymap[pix] > d[-1]).sum()/float(len(pix)))
print
frac[idx] = np.array(f)
depth[idx] = np.array(d)
return frac,depth
def plot_depth(self, fields, depth, outbase, proj='mcbryde', **kwargs):
import skymap, skymap.survey
import pylab as plt
bands = np.unique(fields['FILTER'])
ra,dec = fields['RA'],fields['DEC']
for b in bands:
sel = fields['FILTER']==b
for d in np.unique(fields[sel]['EXPTIME']*fields[sel]['TILING'])[:-1]:
plt.figure()
if proj == 'mcbryde': smap = skymap.McBrydeSkymap()
elif proj == 'maglites': smap = skymap.survey.MaglitesSkymap()
smap.scatter(*smap(ra[sel],dec[sel]),c=depth[sel],vmax=d,edgecolor='none',s=3)
smap.draw_lmc(fc='none')
smap.draw_smc(fc='none')
plt.colorbar()
plt.savefig(outbase%(b,d),bbox_inches='tight')
plt.close()
class DelveFieldArray(FieldArray):
PROGRAM = PROGRAM
PROPID = PROPID
PROPOSER = PROPOSER
SISPI_DICT = copy.deepcopy(SISPI_DICT)
SISPI_DICT["program"] = PROGRAM
SISPI_DICT["propid"] = PROPID
SISPI_DICT["proposer"] = PROPOSER
OBJECT_FMT = NAME.upper() + ' field'+SEP+' %s'
SEQID_FMT = NAME.upper() + ' scheduled'+SEP+' %(DATE)s'
BANDS = BANDS
@classmethod
def query(cls, **kwargs):
""" Generate the database query.
Parameters:
-----------
kwargs : Keyword arguments to fill the query.
Returns:
--------
query : The query string.
"""
defaults = dict(propid=cls.SISPI_DICT['propid'], limit='',
object_fmt = cls.OBJECT_FMT%'')
kwargs = setdefaults(kwargs,copy.deepcopy(defaults))
query ="""
SELECT object, seqid, seqnum, telra as RA, teldec as dec,
expTime, filter,
--to_char(to_timestamp(utc_beg), 'YYYY/MM/DD HH24:MI:SS.MS') AS DATE,
to_char(date, 'YYYY/MM/DD HH24:MI:SS.MS') AS DATE,
COALESCE(airmass,-1) as AIRMASS, COALESCE(moonangl,-1) as MOONANGLE,
COALESCE(ha, -1) as HOURANGLE, COALESCE(slewangl,-1) as SLEW, PROGRAM
--FROM exposure where propid = '%(propid)s' and exptime > 89
--2019B-1014: Felipe Olivares
FROM exposure where propid in ('%(propid)s','2019B-1014') and exptime > 89
and discard = False and delivered = True and flavor = 'object'
and object like '%(object_fmt)s%%'
-- and id NOT IN (860597, 860598, 860599, 860600, 860601, 860602)
and (
COALESCE(qc_teff,-1) NOT BETWEEN 0 and 0.1
OR to_timestamp(utc_beg) > (now() - interval '14 hours')
)
ORDER BY utc_beg %(limit)s
"""%kwargs
return query
class DelveScheduler(Scheduler):
_defaults = odict(Scheduler._defaults.items() + [
('tactician','coverage'),
('windows',fileio.get_datafile("delve-windows-v3.csv.gz")),
('targets',fileio.get_datafile("delve-target-fields-v15.csv.gz")),
])
FieldType = DelveFieldArray
class DelveTactician(Tactician):
CONDITIONS = odict([
(None, [1.0, 2.0]),
('wide', [1.0, 1.4]),
('deep', [1.0, 1.4]),
('mc', [1.0, 2.0]),
('mc_good', [1.8, 2.0]),
('mc_ok', [1.4, 1.8]),
('mc_poor', [1.0, 1.7]),
('gw', [1.0, 2.0]),
])
def __init__(self, *args, **kwargs):
super(DelveTactician,self).__init__(*args,**kwargs)
#Default to mode 'wide' if no mode in kwargs
self.mode = kwargs.get('mode','wide')
@property
def viable_fields(self):
viable = super(DelveTactician,self).viable_fields
viable &= (self.fields['PRIORITY'] >= 0)
return viable
def skybright_select(self):
"""Select fields based on skybrightness and band.
Parameters:
-----------
None
Returns:
--------
sel : boolean selection
"""
sel = np.ones(len(self.fields),dtype=bool)
if (self.sun.alt > -0.28):
# i-band if Sun altitude > -16 deg
sel &= (np.char.count('i',self.fields['FILTER']) > 0)
# Moon band constraints (alt = 0.175 rad = 10 deg)
elif (self.moon.phase >= 60) and (self.moon.alt > 0.175):
# Moon is very bright; only do i
sel &= (np.char.count('i',self.fields['FILTER']) > 0)
# Allow i,z but prefer z
#sel &= (np.char.count('iz',self.fields['FILTER']) > 0)
#weight += 1e2 * (np.char.count('i',self.fields['FILTER']) > 0)
#elif (self.moon.phase >= 45) and (self.moon.alt > 0.175):
elif (self.moon.phase >= 40) and (self.moon.alt > 0.0):
# Moon is more than half full; do r,i
sel &= (np.char.count('ri',self.fields['FILTER']) > 0)
else:
# Moon is faint or down; do g,r (unless none available)
sel &= (np.char.count('gr',self.fields['FILTER']) > 0)
#weight += 1e8 * (np.char.count('iz',self.fields['FILTER']) > 0)
return sel
@property
def weight(self):
if self.mode is None:
# First priority is deep
weights = self.weight_deep()
if self.fwhm < FWHM_DEEP and np.isfinite(weights).sum():
logging.info("DEEP")
return weights
# Then mc
weights = self.weight_mc()
if self.fwhm < FWHM_MC and np.isfinite(weights).sum():
logging.info("MC")
return weights
# Then wide
weights = self.weight_wide()
if np.isfinite(weights).sum():
logging.info("WIDE")
return weights
elif self.mode == 'deep':
return self.weight_deep()
elif self.mode == 'mc':
return self.weight_mc()
elif self.mode == 'wide':
return self.weight_wide()
elif self.mode == 'gw':
return self.weight_gw()
else:
raise ValueError("Unrecognized mode: %s"%self.mode)
raise ValueError("No viable fields")
def weight_deep(self):
""" Calculate the field weight for the WIDE survey. """
airmass = self.airmass
moon_angle = self.moon_angle
sel = self.viable_fields
sel &= (self.fields['PROGRAM'] == 'delve-deep')
weight = np.zeros(len(sel))
# Moon angle constraints
moon_limit = 30. # + (self.moon.phase/5.)
sel &= (moon_angle > moon_limit)
# Sky brightness selection
sel &= self.skybright_select()
# Airmass cut
airmass_min, airmass_max = self.CONDITIONS['deep']
sel &= ((airmass > airmass_min) & (airmass < airmass_max))
## Try hard to do high priority fields
weight += 1e2 * self.fields['PRIORITY']
# Set infinite weight to all disallowed fields
weight[~sel] = np.inf
return weight
def weight_mc(self):
airmass = self.airmass
moon_angle = self.moon_angle
sel = self.viable_fields
sel &= (self.fields['PROGRAM'] == 'delve-mc')
weight = np.zeros(len(sel))
# Moon angle constraints
moon_limit = 30.
sel &= (moon_angle > moon_limit)
# Airmass restrictions
if self.fwhm < 0.9:
airmass_min, airmass_max = self.CONDITIONS['mc_good']
elif self.fwhm < 1.0:
airmass_min, airmass_max = self.CONDITIONS['mc_ok']
else:
airmass_min, airmass_max = self.CONDITIONS['mc_poor']
sel &= ((airmass > airmass_min) & (airmass < airmass_max))
# Sky brightness selection
sel &= self.skybright_select()
# Only a single tiling
#sel &= (self.fields['PRIORITY'] == 3)
# Get fields before they set
#weight += 2.0 * self.hour_angle
# Prioritize fields
weight += 3. * 360. * self.fields['PRIORITY']
weight += 1e4 * (self.fields['TILING'] > 3)
# Slew weighting
weight += self.slew**3
# Try hard to do the same field
weight += 1e5 * (self.slew != 0)
# Higher weight for higher airmass
# airmass = 1.4 -> weight = 6.4
weight += 500. * (airmass - 1.)**3
# Set infinite weight to all disallowed fields
weight[~sel] = np.inf
return weight
def weight_wide(self):
""" Calculate the field weight for the WIDE survey. """
airmass = self.airmass
moon_angle = self.moon_angle
sel = self.viable_fields
sel &= (self.fields['PROGRAM'] == 'delve-wide')
weight = np.zeros(len(sel))
# Sky brightness selection
sel &= self.skybright_select()
# Airmass cut
airmass_min, airmass_max = self.CONDITIONS['wide']
sel &= ((airmass > airmass_min) & (airmass < airmass_max))
# Higher weight for fields close to the moon (when up)
# angle = 50 -> weight = 6.4
# Moon angle constraints (viable fields sets moon_angle > 20.)
if (self.moon.alt > -0.04) and (self.moon.phase >= 10):
#moon_limit = np.min(20 + self.moon.phase/2., 40)
moon_limit = 40. + (self.moon.phase/10.)
sel &= (moon_angle > moon_limit)
#weight += 100 * (35./moon_angle)**3
#weight += 10 * (35./moon_angle)**3
weight += 1 * (35./moon_angle)**3
# Higher weight for rising fields (higher hour angle)
# HA [min,max] = [-53,54] (for airmass 1.4)
#weight += 5.0 * self.hour_angle
weight += 1.0 * self.hour_angle
#weight += 0.1 * self.hour_angle
# Higher weight for larger slews
# slew = 10 deg -> weight = 1e2
weight += self.slew**2
#weight += self.slew
#weight += 1e3 * self.slew
# Higher weight for higher airmass
# airmass = 1.4 -> weight = 6.4
weight += 100. * (airmass - 1.)**3
#weight += 1e3 * (airmass - 1.)**2
# Hack priority near edge of DES S82 (doesn't really work)
#x = (self.fields['RA'] >= 45) & (self.fields['RA'] <= 100) \
# & (self.fields['DEC'] >= -20)
#self.fields['PRIORITY'][x] = np.minimum(self.fields['PRIORITY'][x],1)
## Try hard to do high priority fields
weight += 1e3 * (self.fields['PRIORITY'] - 1)
weight += 1e4 * (self.fields['TILING'] > 3)
# Set infinite weight to all disallowed fields
weight[~sel] = np.inf
return weight
def weight_gw(self):
""" Calculate the field weight for the WIDE survey. """
import healpy as hp
airmass = self.airmass
moon_angle = self.moon_angle
# Reset the exposure time
self.fields['EXPTIME'] = 90
if hasattr(self.fields,'hpx'):
hpx = self.fields.hpx
else:
hpx = hp.ang2pix(32,self.fields['RA'],self.fields['DEC'],lonlat=True)
setattr(self.fields,'hpx',hpx)
gwpix = np.genfromtxt(fileio.get_datafile('GW150914_hpixels_32.tab'))
#sel = self.viable_fields
sel = np.in1d(hpx,gwpix)
sel &= self.fields['FILTER'] == 'g'
weight = np.zeros(len(sel))
# Sky brightness selection
# Airmass cut
airmass_min, airmass_max = self.CONDITIONS['gw']
sel &= ((airmass > airmass_min) & (airmass < airmass_max))
"""
# Higher weight for fields close to the moon (when up)
# angle = 50 -> weight = 6.4
# Moon angle constraints (viable fields sets moon_angle > 20.)
if (self.moon.alt > -0.04) and (self.moon.phase >= 10):
#moon_limit = np.min(20 + self.moon.phase/2., 40)
moon_limit = 40
sel &= (moon_angle > moon_limit)
#weight += 100 * (35./moon_angle)**3
#weight += 10 * (35./moon_angle)**3
weight += 1 * (35./moon_angle)**3
"""
# Higher weight for rising fields (higher hour angle)
# HA [min,max] = [-53,54] (for airmass 1.4)
#weight += 5.0 * self.hour_angle
#weight += 1.0 * self.hour_angle
#weight += 0.1 * self.hour_angle
# Higher weight for larger slews
# slew = 10 deg -> weight = 1e2
weight += self.slew**2
#weight += self.slew
#weight += 1e3 * self.slew
# Higher weight for higher airmass
# airmass = 1.4 -> weight = 6.4
weight += 1e3 * (airmass - 1.)**3
#weight += 1e3 * (airmass - 1.)**2
## Try hard to do high priority fields
#weight += 1e3 * (self.fields['PRIORITY'] - 1)
#weight += 1e4 * (self.fields['TILING'] > 3)
# Set infinite weight to all disallowed fields
weight[~sel] = np.inf
return weight
def select_index(self):
weight = self.weight
index = np.array([np.argmin(weight)],dtype=int)
if np.any(~np.isfinite(weight[index])):
plot = (logging.getLogger().getEffectiveLevel()==logging.DEBUG)
msg = "Infinite weight selected..."
logging.warn(msg)
logging.info(">>> To plot fields enter 'plot=True'")
logging.info(">>> Enter 'c' to continue")
import pdb; pdb.set_trace()
if plot:
import obztak.utils.ortho, pylab as plt
airmass = self.CONDITIONS[self.mode][1]
bmap = obztak.utils.ortho.plotFields(self.completed_fields[-1],self.fields,self.completed_fields,options_basemap=dict(airmass=airmass))
logging.info(">>> Enter 'c' to continue")
pdb.set_trace()
raise ValueError(msg)
return index
| mit |
satishgoda/bokeh | examples/app/stock_applet/stock_app_simple.py | 43 | 12408 | """
This file demonstrates a bokeh applet, which can either be viewed
directly on a bokeh-server, or embedded into a flask application.
See the README.md file in this directory for instructions on running.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
from os import listdir
from os.path import dirname, join, splitext
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource, Plot
from bokeh.plotting import figure, curdoc
from bokeh.properties import String, Instance
from bokeh.server.app import bokeh_app
from bokeh.server.utils.plugins import object_page
from bokeh.models.widgets import (HBox, VBox, VBoxForm, PreText,
Select, AppHBox, AppVBox, AppVBoxForm)
from bokeh.simpleapp import simpleapp
select1 = Select(name='ticker1', value='AAPL', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'])
select2 = Select(name='ticker2', value='GOOG', options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO'])
@simpleapp(select1, select2)
def stock(ticker1, ticker2):
pretext = PreText(text="", width=500)
df = get_data(ticker1, ticker2)
source = ColumnDataSource(data=df)
source.tags = ['main_source']
p = figure(
title="%s vs %s" % (ticker1, ticker2),
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,box_select,reset",
title_text_font_size="10pt",
)
p.circle(ticker1 + "_returns", ticker2 + "_returns",
size=2,
nonselection_alpha=0.02,
source=source
)
stats = df.describe()
pretext.text = str(stats)
row1 = HBox(children=[p, pretext])
hist1 = hist_plot(df, ticker1)
hist2 = hist_plot(df, ticker2)
row2 = HBox(children=[hist1, hist2])
line1 = line_plot(ticker1, source)
line2 = line_plot(ticker2, source, line1.x_range)
output = VBox(children=[row1, row2, line1, line2])
return output
stock.route("/bokeh/stocks/")
@simpleapp(select1, select2)
def stock2(ticker1, ticker2):
pretext = PreText(text="", width=500)
df = get_data(ticker1, ticker2)
source = ColumnDataSource(data=df)
source.tags = ['main_source']
p = figure(
title="%s vs %s" % (ticker1, ticker2),
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,box_select,reset",
title_text_font_size="10pt",
)
p.circle(ticker1 + "_returns", ticker2 + "_returns",
size=2,
nonselection_alpha=0.02,
source=source
)
stats = df.describe()
pretext.text = str(stats)
hist1 = hist_plot(df, ticker1)
hist2 = hist_plot(df, ticker2)
line1 = line_plot(ticker1, source)
line2 = line_plot(ticker2, source, line1.x_range)
return dict(scatterplot=p,
statstext=pretext,
hist1=hist1,
hist2=hist2,
line1=line1,
line2=line2)
@stock2.layout
def stock2_layout(app):
widgets = AppVBoxForm(app=app, children=['ticker1', 'ticker2'])
row1 = AppHBox(app=app, children=['scatterplot', 'statstext'])
row2 = AppHBox(app=app, children=['hist1', 'hist2'])
all_plots = AppVBox(app=app, children=[row1, row2, 'line1', 'line2'])
app = AppHBox(app=app, children=[widgets, all_plots])
return app
@stock2.update(['ticker1', 'ticker2'])
def stock2_update_input(ticker1, ticker2, app):
return stock2(ticker1, ticker2)
@stock2.update([({'tags' : 'main_source'}, ['selected'])])
def stock2_update_selection(ticker1, ticker2, app):
source = app.select_one({'tags' : 'main_source'})
df = get_data(ticker1, ticker2)
if source.selected:
selected_df = df.iloc[source.selected['1d']['indices'], :]
else:
selected_df = df
stats_text = app.objects['statstext']
stats_text.text = str(selected_df.describe())
return {
'hist1': hist_plot(df, ticker1, selected_df=selected_df),
'hist2': hist_plot(df, ticker2, selected_df=selected_df),
'statstext': stats_text,
}
stock2.route("/bokeh/stocks2/")
def hist_plot(df, ticker, selected_df=None):
if selected_df is None:
selected_df = df
global_hist, global_bins = np.histogram(df[ticker + "_returns"], bins=50)
hist, bins = np.histogram(selected_df[ticker + "_returns"], bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
start = global_bins.min()
end = global_bins.max()
top = hist.max()
p = figure(
title="%s hist" % ticker,
plot_width=500, plot_height=200,
tools="",
title_text_font_size="10pt",
x_range=[start, end],
y_range=[0, top],
)
p.rect(center, hist / 2.0, width, hist)
return p
def line_plot(ticker, source, x_range=None):
p = figure(
title=ticker,
x_range=x_range,
x_axis_type='datetime',
plot_width=1000, plot_height=200,
title_text_font_size="10pt",
tools="pan,wheel_zoom,box_select,reset"
)
p.circle(
'date', ticker,
size=2,
source=source,
nonselection_alpha=0.02
)
return p
# build up list of stock data in the daily folder
data_dir = join(dirname(__file__), "daily")
try:
tickers = listdir(data_dir)
except OSError as e:
print('Stock data not available, see README for download instructions.')
raise e
tickers = [splitext(x)[0].split("table_")[-1] for x in tickers]
# cache stock data as dict of pandas DataFrames
pd_cache = {}
def get_ticker_data(ticker):
fname = join(data_dir, "table_%s.csv" % ticker.lower())
data = pd.read_csv(
fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date']
)
data = data.set_index('date')
data = pd.DataFrame({ticker: data.c, ticker + "_returns": data.c.diff()})
return data
def get_data(ticker1, ticker2):
if pd_cache.get((ticker1, ticker2)) is not None:
return pd_cache.get((ticker1, ticker2))
# only append columns if it is the same ticker
if ticker1 != ticker2:
data1 = get_ticker_data(ticker1)
data2 = get_ticker_data(ticker2)
data = pd.concat([data1, data2], axis=1)
else:
data = get_ticker_data(ticker1)
data = data.dropna()
pd_cache[(ticker1, ticker2)] = data
return data
# class StockApp(VBox):
# extra_generated_classes = [["StockApp", "StockApp", "VBox"]]
# jsmodel = "VBox"
# # text statistics
# pretext = Instance(PreText)
# # plots
# plot = Instance(Plot)
# line_plot1 = Instance(Plot)
# line_plot2 = Instance(Plot)
# hist1 = Instance(Plot)
# hist2 = Instance(Plot)
# # data source
# source = Instance(ColumnDataSource)
# # layout boxes
# mainrow = Instance(HBox)
# histrow = Instance(HBox)
# statsbox = Instance(VBox)
# # inputs
# ticker1 = String(default="AAPL")
# ticker2 = String(default="GOOG")
# ticker1_select = Instance(Select)
# ticker2_select = Instance(Select)
# input_box = Instance(VBoxForm)
# def __init__(self, *args, **kwargs):
# super(StockApp, self).__init__(*args, **kwargs)
# self._dfs = {}
# @classmethod
# def create(cls):
# """
# This function is called once, and is responsible for
# creating all objects (plots, datasources, etc)
# """
# # create layout widgets
# obj = cls()
# # create input widgets
# obj.make_inputs()
# # outputs
# obj.pretext = PreText(text="", width=500)
# obj.make_source()
# obj.make_plots()
# obj.make_stats()
# # layout
# obj.set_children()
# return obj
# def make_inputs(self):
# self.ticker1_select = Select(
# name='ticker1',
# value='AAPL',
# options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
# )
# self.ticker2_select = Select(
# name='ticker2',
# value='GOOG',
# options=['AAPL', 'GOOG', 'INTC', 'BRCM', 'YHOO']
# )
# @property
# def selected_df(self):
# pandas_df = self.df
# selected = self.source.selected
# if selected:
# pandas_df = pandas_df.iloc[selected, :]
# return pandas_df
# def make_source(self):
# self.source = ColumnDataSource(data=self.df)
# def line_plot(self, ticker, x_range=None):
# p = figure(
# title=ticker,
# x_range=x_range,
# x_axis_type='datetime',
# plot_width=1000, plot_height=200,
# title_text_font_size="10pt",
# tools="pan,wheel_zoom,box_select,reset"
# )
# p.circle(
# 'date', ticker,
# size=2,
# source=self.source,
# nonselection_alpha=0.02
# )
# return p
# def hist_plot(self, ticker):
# global_hist, global_bins = np.histogram(self.df[ticker + "_returns"], bins=50)
# hist, bins = np.histogram(self.selected_df[ticker + "_returns"], bins=50)
# width = 0.7 * (bins[1] - bins[0])
# center = (bins[:-1] + bins[1:]) / 2
# start = global_bins.min()
# end = global_bins.max()
# top = hist.max()
# p = figure(
# title="%s hist" % ticker,
# plot_width=500, plot_height=200,
# tools="",
# title_text_font_size="10pt",
# x_range=[start, end],
# y_range=[0, top],
# )
# p.rect(center, hist / 2.0, width, hist)
# return p
# def make_plots(self):
# ticker1 = self.ticker1
# ticker2 = self.ticker2
# p = figure(
# title="%s vs %s" % (ticker1, ticker2),
# plot_width=400, plot_height=400,
# tools="pan,wheel_zoom,box_select,reset",
# title_text_font_size="10pt",
# )
# p.circle(ticker1 + "_returns", ticker2 + "_returns",
# size=2,
# nonselection_alpha=0.02,
# source=self.source
# )
# self.plot = p
# self.line_plot1 = self.line_plot(ticker1)
# self.line_plot2 = self.line_plot(ticker2, self.line_plot1.x_range)
# self.hist_plots()
# def hist_plots(self):
# ticker1 = self.ticker1
# ticker2 = self.ticker2
# self.hist1 = self.hist_plot(ticker1)
# self.hist2 = self.hist_plot(ticker2)
# def set_children(self):
# self.children = [self.mainrow, self.histrow, self.line_plot1, self.line_plot2]
# self.mainrow.children = [self.input_box, self.plot, self.statsbox]
# self.input_box.children = [self.ticker1_select, self.ticker2_select]
# self.histrow.children = [self.hist1, self.hist2]
# self.statsbox.children = [self.pretext]
# def input_change(self, obj, attrname, old, new):
# if obj == self.ticker2_select:
# self.ticker2 = new
# if obj == self.ticker1_select:
# self.ticker1 = new
# self.make_source()
# self.make_plots()
# self.set_children()
# curdoc().add(self)
# def setup_events(self):
# super(StockApp, self).setup_events()
# if self.source:
# self.source.on_change('selected', self, 'selection_change')
# if self.ticker1_select:
# self.ticker1_select.on_change('value', self, 'input_change')
# if self.ticker2_select:
# self.ticker2_select.on_change('value', self, 'input_change')
# def make_stats(self):
# stats = self.selected_df.describe()
# self.pretext.text = str(stats)
# def selection_change(self, obj, attrname, old, new):
# self.make_stats()
# self.hist_plots()
# self.set_children()
# curdoc().add(self)
# @property
# def df(self):
# return get_data(self.ticker1, self.ticker2)
# # The following code adds a "/bokeh/stocks/" url to the bokeh-server. This URL
# # will render this StockApp. If you don't want serve this applet from a Bokeh
# # server (for instance if you are embedding in a separate Flask application),
# # then just remove this block of code.
# @bokeh_app.route("/bokeh/stocks/")
# @object_page("stocks")
# def make_object():
# app = StockApp.create()
# return app
| bsd-3-clause |
clemkoa/scikit-learn | benchmarks/bench_plot_svd.py | 72 | 2914 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
import six
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(six.iteritems(results))):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
potash/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 16 | 3082 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import warnings
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import clean_warning_registry
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_array_equal(chi2.get_support(indices=True), [0])
assert_array_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float64)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_array_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_array_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_array_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chi2_unused_feature():
# Unused feature should evaluate to NaN
# and should issue no runtime warning
clean_warning_registry()
with warnings.catch_warnings(record=True) as warned:
warnings.simplefilter('always')
chi, p = chi2([[1, 0], [0, 0]], [1, 0])
for w in warned:
if 'divide by zero' in w.message:
raise AssertionError('Found unexpected warning %s' % w)
assert_array_equal(chi, [1, np.nan])
assert_array_equal(p[1], np.nan)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
ProkopHapala/SimpleSimulationEngine | python/pyMolecular/eFF.py | 1 | 5595 |
import numpy as np
from ctypes import c_int, c_double, c_bool, c_float, c_char_p, c_bool, c_void_p, c_char_p
import ctypes
import os
import sys
sys.path.append('../')
from pyMeta import cpp_utils
c_double_p = ctypes.POINTER(c_double)
c_int_p = ctypes.POINTER(c_int)
def _np_as(arr,atype):
if arr is None:
return None
else:
return arr.ctypes.data_as(atype)
cpp_utils.s_numpy_data_as_call = "_np_as(%s,%s)"
# ===== To generate Interfaces automatically from headers call:
header_strings = [
"void init_buffers(){",
"bool load_xyz( const char* fname ){",
"void init( int na, int ne ){",
"void eval(){",
"void info(){",
"double* getEnergyPointer(){",
"int* getDimPointer (){",
"double* getBuff(const char* name){",
"void setBuff(const char* name, double* buff){",
"int* getIBuff(const char* name){",
"void setIBuff(const char* name, int* buff){",
"void setPauliModel(int i){",
"void setKPauli( double KPauli ){",
]
#cpp_utils.writeFuncInterfaces( header_strings ); exit() # uncomment this to re-generate C-python interfaces
#libSDL = ctypes.CDLL( "/usr/lib/x86_64-linux-gnu/libSDL2.so", ctypes.RTLD_GLOBAL )
#libGL = ctypes.CDLL( "/usr/lib/x86_64-linux-gnu/libGL.so", ctypes.RTLD_GLOBAL )
#cpp_name='CombatModels'
#cpp_utils.make(cpp_name)
#LIB_PATH = os.path.dirname( os.path.realpath(__file__) )
#LIB_PATH_CPP = os.path.normpath(LIB_PATH+'../../../'+'/cpp/Build/libs/'+cpp_name )
#lib = ctypes.CDLL( LIB_PATH_CPP+("/lib%s.so" %cpp_name) )
cpp_utils.BUILD_PATH = os.path.normpath( cpp_utils.PACKAGE_PATH + '../../../cpp/Build/libs/Molecular' )
lib = cpp_utils.loadLib('eFF_lib')
array1ui = np.ctypeslib.ndpointer(dtype=np.uint32, ndim=1, flags='CONTIGUOUS')
array1i = np.ctypeslib.ndpointer(dtype=np.int32, ndim=1, flags='CONTIGUOUS')
array2i = np.ctypeslib.ndpointer(dtype=np.int32, ndim=2, flags='CONTIGUOUS')
array1d = np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
array2d = np.ctypeslib.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS')
array3d = np.ctypeslib.ndpointer(dtype=np.double, ndim=3, flags='CONTIGUOUS')
# ========= C functions
# void init_buffers(){
lib.init_buffers.argtypes = []
lib.init_buffers.restype = None
def init_buffers():
return lib.init_buffers()
# void load_xyz( const char* fname ){
lib.load_xyz.argtypes = [c_char_p]
lib.load_xyz.restype = c_bool
def load_xyz(fname):
return lib.load_xyz(fname)
#return lib.load_xyz(_np_as(fname,c_char_p))
# void init( int na, int ne ){
lib.init.argtypes = [c_int, c_int]
lib.init.restype = None
def init(na, ne):
return lib.init(na, ne)
# void eval(){
lib.eval.argtypes = []
lib.eval.restype = c_double
def eval():
return lib.eval()
#void evalFuncDerivs( int n, double* r, double* s, double* Es, double* Fs ){
lib.evalFuncDerivs.argtypes = [ c_int, array1d, array1d, array1d, array1d ]
lib.evalFuncDerivs.restype = None
def evalFuncDerivs( r, s, Es=None, Fs=None ):
r = r + s*0
s = s + r*0
n = len(r)
if Es is None: Es=np.zeros(n)
if Fs is None: Fs=np.zeros(n)
lib.evalFuncDerivs( n, r, s, Es, Fs )
return Es,Fs
# void info(){
lib.info.argtypes = []
lib.info.restype = None
def info():
return lib.info()
# double* getEnergyPointer(){
lib.getEnergyPointer.argtypes = []
lib.getEnergyPointer.restype = c_double_p
def getEnergyTerms( sh=(7,) ):
# Ek=0, Eee EeePaul EeeExch Eae EaePaul Eaa
ptr = lib.getEnergyPointer()
return np.ctypeslib.as_array( ptr, shape=sh )
#int* getDimPointer (){
lib.getDimPointer.argtypes = []
lib.getDimPointer.restype = c_int_p
def getDimPointer( sh=(3,) ):
# ne=0 na=0 nDOFs=0
ptr = lib.getDimPointer()
return np.ctypeslib.as_array( ptr, shape=sh )
# double* getBuff(const char* name){
lib.getBuff.argtypes = [c_char_p]
lib.getBuff.restype = c_double_p
def getBuff( name, sh ):
ptr = lib.getBuff(name)
if not isinstance(sh, tuple): sh=(sh,)
#sh_ = (natom,)
#if sh is not None:
# sh_ = sh_ + sh
print "DEBUG type( ptr ) ", type( ptr ), sh
return np.ctypeslib.as_array( ptr, shape=sh)
# void setBuff(const char* name, double* buff){
lib.setBuff.argtypes = [c_char_p, c_double_p]
lib.setBuff.restype = None
def setBuff(name, buff):
return lib.setBuff(name, _np_as(buff,c_double_p))
#return lib.setBuff(_np_as(name,c_char_p), _np_as(buff,c_double_p))
# int* getIBuff(const char* name){
lib.getIBuff.argtypes = [c_char_p]
lib.getIBuff.restype = c_int_p
def getIBuff(name,sh):
ptr = lib.getIBuff(name)
if not isinstance(sh, tuple): sh=(sh,)
return np.ctypeslib.as_array( ptr, shape=sh)
#return lib.getIBuff(_np_as(name,c_char_p))
# void setIBuff(const char* name, int* buff){
lib.setIBuff.argtypes = [c_char_p, c_int_p]
lib.setIBuff.restype = None
def setIBuff(name, buff):
return lib.setIBuff(name, _np_as(buff,c_int_p))
#return lib.setIBuff(_np_as(name,c_char_p), _np_as(buff,c_int_p))
# void setPauliModel(int i){
lib.setPauliModel.argtypes = [c_int]
lib.setPauliModel.restype = None
def setPauliModel(i):
return lib.setPauliModel(i)
# void setKPauli( double KPauli ){
lib.setKPauli.argtypes = [c_double]
lib.setKPauli.restype = None
def setKPauli(KPauli):
return lib.setKPauli(KPauli)
# ========= Python Functions
if __name__ == "__main__":
import matplotlib.pyplot as plt
load_xyz("../../cpp/sketches_SDL/Molecular/data/e2_eFF.xyz")
#load_xyz("../../cpp/sketches_SDL/Molecular/data/H2O_eFF.xyz")
info()
eval()
plt.show() | mit |
quidditymaster/piecewise_polynomial | piecewise_polynomial.py | 1 | 17338 | #Author Tim Anderton
#Created Feb 2012
"""A module for representing and fitting piecewise polynomial functions
with and without regularity constraints.
"""
import numpy as np
lna = np.linalg
poly1d = np.poly1d
import matplotlib.pyplot as plt
#Legendre = np.polynomial.legendre.Legendre
class Centered_Scaled_Polynomial:
"represents polynomials P(y) in a centered scaled coordinate y = (x-c)/s"
def __init__(self, coefficients, center = 0.0, scale = 1.0):
self.poly = poly1d(coefficients)
self.center = center
self.scale = scale
def __call__(self, xdat):
return self.poly((xdat-self.center)/self.scale)
def deriv(self):
new_coeffs = self.poly.deriv().c/self.scale #divide by scale because of chain rule
return Centered_Scaled_Polynomial(new_coeffs, self.center)
class Polynomial_Basis:
"a class representing a collection of polynomials"
def __init__(self, poly_coefficients, center = 0.0, scale = 1.0):
"""coefficients: a (n_basis, poly_order+1) shaped array with the polynomial
coefficients"""
self.coeffficients = poly_coefficients.transpose()
self.n_basis, order_plus_one = poly_coefficients.shape
self.order = order_plus_one - 1
self.center = center
self.scale = scale
self.basis_polys = []
for poly_idx in xrange(self.n_basis):
self.basis_polys.append(Centered_Scaled_Polynomial(self.coefficients[poly_idx], self.center, self.scale))
def evaluate_polynomial(self, basis_coefficients):
"returns a polynomial as a weighted sum of the basis polynomials"
output_poly_coeffs = np.dot(self.coefficients, basis_coefficients)
outpol = Centered_Scaled_Polynomial(output_poly_coeffs, self.center, self.scale)
return outpol
def evaluate_basis(self, xvals):
"""returns a (self.n_basis, len(xvals)) shaped array
containing the polynomials evaluated at the positions in xvals"""
xvec = np.array(xvals)
out_basis = np.zeros((self.n_basis, len(xvec)))
for basis_idx in xrange(self.n_basis):
out_basis[basis_idx] = self.basis_polys[basis_idx](xvec)
return out_basis
class Centered_Scaled_Multinomial:
def __init__(self, coefficients, powers, center, scale = 1.0):
self.coeffs = np.array(coefficients)
self.powers = np.array(powers, dtype = int)
self.max_powers = np.max(self.powers, axis = 0)
self.center = np.array(center)
self.n_coeffs, self.n_dims = self.powers.shape
if scale == 1.0:
self.scale = np.ones(self.n_dims, dtype = float)
else:
self.scale = np.array(scale)
def __add__(self, B):
new_coeffs = []
new_powers = []
powers_dict = {}
for coeff_idx in xrange(self.n_coeffs):
new_coeffs.append(self.coeffs[coeff_idx])
new_powers.append(self.powers[coeff_idx])
powers_dict[tuple(self.powers[coeff_idx])] = coeff_idx
for coeff_idx in xrange(B.n_coeffs):
cpow = tuple(B.powers[coeff_idx])
out_idx = powers_dict.get(cpow)
if out_idx != None:
new_coeffs[out_idx] += B.coeffs[coeff_idx]
else:
new_coeffs.append(B.coeffs[coeff_idx])
new_powers.append(B.powers[coeff_idx])
return Centered_Scaled_Multinomial(new_coeffs, new_powers, self.center, self.scale)
def __mul__(self, B):
new_coeffs = []
new_powers = []
powers_dict = {}
cur_out_idx = 0
for coeff_idx_1 in xrange(self.n_coeffs):
for coeff_idx_2 in xrange(B.n_coeffs):
cpow = self.powers[coeff_idx_1] + B.powers[coeff_idx_2]
tcpow = tuple(cpow)
ccoeff = self.coeffs[coeff_idx_1]*B.coeffs[coeff_idx_2]
out_idx = powers_dict.get(tcpow)
if out_idx != None:
new_coeffs[out_idx] += ccoeff
else:
powers_dict[tcpow] = cur_out_idx
new_coeffs.append(ccoeff)
new_powers.append(cpow)
cur_out_idx += 1
return Centered_Scaled_Multinomial(new_coeffs, new_powers, self.center, self.scale)
def __call__(self, x):
xpowers = [[1.0] for i in xrange(self.n_dims)]
for dim_idx in xrange(self.n_dims):
pow_num = 1
while pow_num <= self.max_powers[dim_idx]:
xpowers[dim_idx].append(xpowers[dim_idx][-1]*x[dim_idx])
pow_num += 1
result = 0
for coeff_idx in xrange(self.n_coeffs):
cpow = 1.0
for dim_idx in xrange(self.n_dims):
pow_idx = self.powers[coeff_idx, dim_idx]
cpow *= xpowers[dim_idx][pow_idx]
result += self.coeffs[coeff_idx]*cpow
return result
def multinomial_from_polynomial(poly_coeffs, center, scale, axis):
"""creates a multinomial from a 1d polynomial
poly_coeffs: the 1d polynomial coefficients highest order first
center: the multi-dimensional center M(x) is M_shift((x-center)/scale)
scale: the multi-dimensional scale
axis: the number of the dimension which the multinomial will be a function
"""
n_coeffs = len(poly_coeffs)
n_dims = len(center)
powers = np.zeros((n_coeffs, n_dims), dtype = int)
powers[:, axis] = np.arange(n_coeffs-1, -1, -1)
return Centered_Scaled_Multinomial(poly_coeffs, powers, center, scale)
class Binning:
def __init__(self, bins):
self.bins = bins
self.lb = bins[0]
self.ub = bins[-1]
self.n_bounds = len(self.bins)
self.last_bin = bins[0], bins[1]
self.last_bin_idx = 0
def get_bin_index(self, xvec):
xv = np.array(xvec)
out_idxs = np.zeros(len(xv.flat), dtype = int)
for x_idx in xrange(len(xv.flat)):
#check if the last solution still works
if self.last_bin[0] <= xvec[x_idx] <= self.last_bin[1]:
out_idxs[x_idx] = self.last_bin_idx
continue
lbi, ubi = 0, self.n_bounds-1
#import pdb; pdb.set_trace()
while True:
mididx = (lbi+ubi)/2
midbound = self.bins[mididx]
if midbound <= xvec[x_idx]:
lbi = mididx
else:
ubi = mididx
if self.bins[lbi] <= xvec[x_idx] <= self.bins[lbi+1]:
self.last_bin = self.bins[lbi], self.bins[lbi+1]
self.last_bin_idx = lbi
break
out_idxs[x_idx] = lbi
return out_idxs
class Piecewise_Polynomial:
def __init__(self, coefficients, control_points, centers = None, scales = None, bounds = (float("-inf"), float("inf")), fill_value = np.nan):
"""represents a piecewise polynomial function which transitions from one polynomial
to the next at the control points.
coefficients should be an (m, n) array
m is the number of polynomial pieces == len(control_points) + 1
n is the order of the polynomial pieces
The function takes on values which are determined by the polynomial coefficients with the highest order terms coming first and each polynomail being centered around either the corresponding value in the centers array if it is passed as an argument By default the center is chosen as the midpoint of its two bounding points. If one of the current bounding points is + or -infinity the other bounding point is taken as the "center" of that polynomial bin
Example:
coefficients = np.array([[3, 2], [1, 0], [-1, -1]]) control_points = [5, 6]
and bounds = (-float('inf'), 8)
because the centers are
would be evaluated at a point x < 5 as
y = 3*(x-5) + 2
and at a point 5 < x < 6
y = 1*(x-4.5) + 0
and at a point 6 < x < 8
y = -1*(x-7) + -1
points above the upper bound of 8 will return nan
"""
self.coefficients = coefficients
self.bounds = bounds
self.control_points = control_points
n_polys, poly_order = coefficients.shape
self.poly_order = poly_order
self.ncp = len(control_points)
self.fill_value = fill_value
boundary_points = np.zeros(self.ncp+2)
boundary_points[0] = bounds[0]
boundary_points[-1] = bounds[1]
boundary_points[1:-1] = control_points
self.binning = Binning(boundary_points)
self.n_polys = n_polys
if centers == None:
self.centers = np.zeros(n_polys)
#set the centers in such a way to allow for infinite bounds
for center_idx in range(n_polys):
lb = boundary_points[center_idx]
ub = boundary_points[center_idx+1]
if lb == float("-inf"):
lb = boundary_points[center_idx+1]
if ub == float("inf"):
ub = boundary_points[center_idx]
self.centers[center_idx] = 0.5*(lb+ub)
else:
self.centers = centers
if scales == None:
self.scales = np.ones(n_polys)
else:
self.scales = scales
self.poly_list = []
for poly_idx in range(n_polys):
self.poly_list.append(Centered_Scaled_Polynomial(coefficients[poly_idx], self.centers[poly_idx], self.scales[poly_idx]))
def __call__(self, x_in):
output = np.zeros(x_in.shape)
poly_idxs = self.binning.get_bin_index(x_in)
output[np.isnan(poly_idxs)] = self.fill_value
for p_idx in xrange(self.n_polys):
pmask = poly_idxs == p_idx
output[pmask] = self.poly_list[p_idx](x_in[pmask])
return output
class Regularity_Constrained_Piecewise_Polynomial_Basis:
def __init__(self, poly_order, control_points, centers = None, scales = None, regularity_constraints = None, bounds = (float("-inf"), float("inf"))):
self.bounds = bounds
self.control_points = control_points
self.poly_order = poly_order
self.ncp = len(control_points)
if regularity_constraints == None:
self.regularity_constraints = np.ones((poly_order, self.ncp), dtype = bool)
else:
self.regularity_constraints = regularity_constraints
boundary_points = np.zeros(self.ncp+2)
boundary_points[0] = bounds[0]
boundary_points[-1] = bounds[1]
boundary_points[1:-1] = control_points
self.binning = Binning(boundary_points)
n_polys = self.ncp+1
self.n_polys = n_polys
if centers == None:
self.centers = np.zeros(n_polys)
#set the centers in such a way to allow for infinite bounds
for center_idx in range(n_polys):
lb = boundary_points[center_idx]
ub = boundary_points[center_idx+1]
if lb == float("-inf"):
lb = boundary_points[center_idx+1]
if ub == float("inf"):
ub = boundary_points[center_idx]
self.centers[center_idx] = 0.5*(lb+ub)
else:
self.centers = centers
if scales == None:
scales = np.ones(n_polys)
self.scales = scales
poly_basis_list = [[] for i in range(n_polys)]
for poly_i in range(n_polys):
#cdomain = (self.boundary_points[poly_i], self.boundary_points[poly_i+1])
for comp_i in range(poly_order+1):
comp_vec = np.zeros((poly_order+1))
comp_vec[comp_i] = 1.0
#poly_basis_list[poly_i].append(Legendre(comp_vec, domain = cdomain))
poly_basis_list[poly_i].append(Centered_Scaled_Polynomial(comp_vec, self.centers[poly_i], self.scales[poly_i]))
#generate the constraint matrix
#nrows = self.poly_order*self.ncp
nrows = np.sum(self.regularity_constraints)
constraint_matrix = np.zeros((nrows, (self.poly_order+1)*self.n_polys))
constraint_number = 0
nco, ncp = self.regularity_constraints.shape
for control_i in range(ncp):
c_control_point = self.control_points[control_i]
l_basis = poly_basis_list[control_i] #left basis functions
r_basis = poly_basis_list[control_i+1] #right basis functions
for constraint_order in range(nco):
if not self.regularity_constraints[constraint_order, control_i]:
continue
fp_coeff_idx = control_i*(self.poly_order+1)
sp_coeff_idx = (control_i+1)*(self.poly_order+1)
#print "cp", control_i, "sp i", sp_coeff_idx
for coefficient_i in range(self.poly_order+1):
lreg_coeff = l_basis[coefficient_i](c_control_point)
rreg_coeff = r_basis[coefficient_i](c_control_point)
constraint_matrix[constraint_number, fp_coeff_idx+coefficient_i] = lreg_coeff
constraint_matrix[constraint_number, sp_coeff_idx+coefficient_i] = -rreg_coeff
#go up to the next order constraint by taking the derivative of our basis functions
constraint_number += 1
l_basis = [cpoly.deriv() for cpoly in l_basis]
r_basis = [cpoly.deriv() for cpoly in r_basis]
self.constraint_matrix = constraint_matrix
u, s, v = lna.svd(self.constraint_matrix, full_matrices=True)
self.n_basis = (self.poly_order+1)*self.n_polys-nrows
self.basis_coefficients = np.zeros((self.n_basis, self.n_polys, self.poly_order+1))
self.basis_polys = [[] for bi in range(self.n_basis)]
for basis_i in range(self.n_basis):
for poly_i in range(self.n_polys):
coeff_lb = (self.poly_order+1)*poly_i
coeff_ub = coeff_lb + self.poly_order+1
ccoeffs = v[-(basis_i+1)][coeff_lb:coeff_ub]
self.basis_coefficients[basis_i, poly_i] = ccoeffs
self.basis_polys[basis_i].append(Centered_Scaled_Polynomial(ccoeffs, self.centers[poly_i], self.scales[poly_i]))
def get_basis(self, in_vec):
xvec = np.array(in_vec)
poly_idxs = self.binning.get_bin_index(xvec)
out_basis = np.zeros((self.n_basis, len(xvec)))
for basis_idx in xrange(self.n_basis):
for poly_idx in xrange(self.n_polys):
xmask = poly_idxs == poly_idx
cx = xvec[xmask]
out_basis[basis_idx][xmask] = self.basis_polys[basis_idx][poly_idx](cx)
return out_basis
def regularize_ppol_basis_wrt_x_basis(self, xvec):
""" if xvec has at least 1+ncp+order distinct values in it this function will modify the basis polynomial coefficients so that they represent piecewise polynomial functions that are orthogonal with respect to the basis in x. (Because of the way the piecewise polynomials are generated they are originally orthogonal in the space of polynomial coefficients)
"""
cbasis = self.get_basis(xvec)
u, s, v = lna.svd(cbasis, full_matrices = False)
#TODO: use the orthogonalized basis vectors in v to set the self.basis_coefficients variable
#import pdb; pdb.set_trace()
def fit_piecewise_polynomial(x, y, order, control_points, bounds = (float("-inf"), float("inf")), regularity_constraints = None, centers = None, scales = "autoscale"):
if scales == "autoscale":
scales = np.ones(len(control_points)+1)*np.std(x)*(len(control_points)+1)
pp_gen = Regularity_Constrained_Piecewise_Polynomial_Basis(order, control_points=control_points, bounds = bounds, regularity_constraints = regularity_constraints, centers = centers, scales = scales)
gbasis = pp_gen.get_basis(x)
n_polys = len(control_points) + 1
n_coeffs = order+1
out_coeffs = np.zeros((n_polys, n_coeffs))
fit_coeffs = np.linalg.lstsq(gbasis.transpose(), y)[0]
for basis_idx in xrange(pp_gen.n_basis):
c_coeffs = pp_gen.basis_coefficients[basis_idx].reshape((n_polys, n_coeffs))
out_coeffs += c_coeffs*fit_coeffs[basis_idx]
return Piecewise_Polynomial(out_coeffs, control_points, centers = centers, scales = scales, bounds = bounds)
RCPPB = Regularity_Constrained_Piecewise_Polynomial_Basis #a shortcut for the reiculously long name
if __name__ == "__main__":
test_x = np.linspace(-1, 1, 4000)
test_y = test_x * 2 - test_x**2 + 3.14
ppol = fit_piecewise_polynomial(test_x, test_y, 3, np.array([-0.5, 0.5]))
fit_y = ppol(test_x)
if np.sum(np.abs(test_y-fit_y)) <= 1e-10:
print "PASSED exact fit test"
else:
print "FAILED exact fit test"
A = Centered_Scaled_Multinomial([1, 1], [[1, 0], [0, 1]], center = 0, scale = 0)
B = Centered_Scaled_Multinomial([1, 1], [[1, 0], [0, 2]], center = 0, scale = 0)
##orthogonalization
#randx = np.random.random(30)-0.5
#rcppb = RCPPB(3, [-0.5, 0.5])
| apache-2.0 |
xray/xray | xarray/plot/utils.py | 1 | 26251 | import itertools
import textwrap
import warnings
from datetime import datetime
from inspect import getfullargspec
from typing import Any, Iterable, Mapping, Tuple, Union
import numpy as np
import pandas as pd
from ..core.options import OPTIONS
from ..core.utils import is_scalar
try:
import nc_time_axis # noqa: F401
nc_time_axis_available = True
except ImportError:
nc_time_axis_available = False
ROBUST_PERCENTILE = 2.0
_registered = False
def register_pandas_datetime_converter_if_needed():
# based on https://github.com/pandas-dev/pandas/pull/17710
global _registered
if not _registered:
pd.plotting.register_matplotlib_converters()
_registered = True
def import_matplotlib_pyplot():
"""Import pyplot as register appropriate converters."""
register_pandas_datetime_converter_if_needed()
import matplotlib.pyplot as plt
return plt
def _determine_extend(calc_data, vmin, vmax):
extend_min = calc_data.min() < vmin
extend_max = calc_data.max() > vmax
if extend_min and extend_max:
extend = "both"
elif extend_min:
extend = "min"
elif extend_max:
extend = "max"
else:
extend = "neither"
return extend
def _build_discrete_cmap(cmap, levels, extend, filled):
"""
Build a discrete colormap and normalization of the data.
"""
import matplotlib as mpl
if not filled:
# non-filled contour plots
extend = "max"
if extend == "both":
ext_n = 2
elif extend in ["min", "max"]:
ext_n = 1
else:
ext_n = 0
n_colors = len(levels) + ext_n - 1
pal = _color_palette(cmap, n_colors)
new_cmap, cnorm = mpl.colors.from_levels_and_colors(levels, pal, extend=extend)
# copy the old cmap name, for easier testing
new_cmap.name = getattr(cmap, "name", cmap)
# copy colors to use for bad, under, and over values in case they have been
# set to non-default values
try:
# matplotlib<3.2 only uses bad color for masked values
bad = cmap(np.ma.masked_invalid([np.nan]))[0]
except TypeError:
# cmap was a str or list rather than a color-map object, so there are
# no bad, under or over values to check or copy
pass
else:
under = cmap(-np.inf)
over = cmap(np.inf)
new_cmap.set_bad(bad)
# Only update under and over if they were explicitly changed by the user
# (i.e. are different from the lowest or highest values in cmap). Otherwise
# leave unchanged so new_cmap uses its default values (its own lowest and
# highest values).
if under != cmap(0):
new_cmap.set_under(under)
if over != cmap(cmap.N - 1):
new_cmap.set_over(over)
return new_cmap, cnorm
def _color_palette(cmap, n_colors):
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
colors_i = np.linspace(0, 1.0, n_colors)
if isinstance(cmap, (list, tuple)):
# we have a list of colors
cmap = ListedColormap(cmap, N=n_colors)
pal = cmap(colors_i)
elif isinstance(cmap, str):
# we have some sort of named palette
try:
# is this a matplotlib cmap?
cmap = plt.get_cmap(cmap)
pal = cmap(colors_i)
except ValueError:
# ValueError happens when mpl doesn't like a colormap, try seaborn
try:
from seaborn import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except (ValueError, ImportError):
# or maybe we just got a single color as a string
cmap = ListedColormap([cmap], N=n_colors)
pal = cmap(colors_i)
else:
# cmap better be a LinearSegmentedColormap (e.g. viridis)
pal = cmap(colors_i)
return pal
# _determine_cmap_params is adapted from Seaborn:
# https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158
# Used under the terms of Seaborn's license, see licenses/SEABORN_LICENSE.
def _determine_cmap_params(
plot_data,
vmin=None,
vmax=None,
cmap=None,
center=None,
robust=False,
extend=None,
levels=None,
filled=True,
norm=None,
_is_facetgrid=False,
):
"""
Use some heuristics to set good defaults for colorbar and range.
Parameters
==========
plot_data: Numpy array
Doesn't handle xarray objects
Returns
=======
cmap_params : dict
Use depends on the type of the plotting function
"""
import matplotlib as mpl
if isinstance(levels, Iterable):
levels = sorted(levels)
calc_data = np.ravel(plot_data[np.isfinite(plot_data)])
# Handle all-NaN input data gracefully
if calc_data.size == 0:
# Arbitrary default for when all values are NaN
calc_data = np.array(0.0)
# Setting center=False prevents a divergent cmap
possibly_divergent = center is not False
# Set center to 0 so math below makes sense but remember its state
center_is_none = False
if center is None:
center = 0
center_is_none = True
# Setting both vmin and vmax prevents a divergent cmap
if (vmin is not None) and (vmax is not None):
possibly_divergent = False
# Setting vmin or vmax implies linspaced levels
user_minmax = (vmin is not None) or (vmax is not None)
# vlim might be computed below
vlim = None
# save state; needed later
vmin_was_none = vmin is None
vmax_was_none = vmax is None
if vmin is None:
if robust:
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
else:
vmin = calc_data.min()
elif possibly_divergent:
vlim = abs(vmin - center)
if vmax is None:
if robust:
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
else:
vmax = calc_data.max()
elif possibly_divergent:
vlim = abs(vmax - center)
if possibly_divergent:
levels_are_divergent = (
isinstance(levels, Iterable) and levels[0] * levels[-1] < 0
)
# kwargs not specific about divergent or not: infer defaults from data
divergent = (
((vmin < 0) and (vmax > 0)) or not center_is_none or levels_are_divergent
)
else:
divergent = False
# A divergent map should be symmetric around the center value
if divergent:
if vlim is None:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
# Now add in the centering value and set the limits
vmin += center
vmax += center
# now check norm and harmonize with vmin, vmax
if norm is not None:
if norm.vmin is None:
norm.vmin = vmin
else:
if not vmin_was_none and vmin != norm.vmin:
raise ValueError("Cannot supply vmin and a norm with a different vmin.")
vmin = norm.vmin
if norm.vmax is None:
norm.vmax = vmax
else:
if not vmax_was_none and vmax != norm.vmax:
raise ValueError("Cannot supply vmax and a norm with a different vmax.")
vmax = norm.vmax
# if BoundaryNorm, then set levels
if isinstance(norm, mpl.colors.BoundaryNorm):
levels = norm.boundaries
# Choose default colormaps if not provided
if cmap is None:
if divergent:
cmap = OPTIONS["cmap_divergent"]
else:
cmap = OPTIONS["cmap_sequential"]
# Handle discrete levels
if levels is not None:
if is_scalar(levels):
if user_minmax:
levels = np.linspace(vmin, vmax, levels)
elif levels == 1:
levels = np.asarray([(vmin + vmax) / 2])
else:
# N in MaxNLocator refers to bins, not ticks
ticker = mpl.ticker.MaxNLocator(levels - 1)
levels = ticker.tick_values(vmin, vmax)
vmin, vmax = levels[0], levels[-1]
# GH3734
if vmin == vmax:
vmin, vmax = mpl.ticker.LinearLocator(2).tick_values(vmin, vmax)
if extend is None:
extend = _determine_extend(calc_data, vmin, vmax)
if levels is not None or isinstance(norm, mpl.colors.BoundaryNorm):
cmap, newnorm = _build_discrete_cmap(cmap, levels, extend, filled)
norm = newnorm if norm is None else norm
# vmin & vmax needs to be None if norm is passed
# TODO: always return a norm with vmin and vmax
if norm is not None:
vmin = None
vmax = None
return dict(
vmin=vmin, vmax=vmax, cmap=cmap, extend=extend, levels=levels, norm=norm
)
def _infer_xy_labels_3d(darray, x, y, rgb):
"""
Determine x and y labels for showing RGB images.
Attempts to infer which dimension is RGB/RGBA by size and order of dims.
"""
assert rgb is None or rgb != x
assert rgb is None or rgb != y
# Start by detecting and reporting invalid combinations of arguments
assert darray.ndim == 3
not_none = [a for a in (x, y, rgb) if a is not None]
if len(set(not_none)) < len(not_none):
raise ValueError(
"Dimension names must be None or unique strings, but imshow was "
"passed x=%r, y=%r, and rgb=%r." % (x, y, rgb)
)
for label in not_none:
if label not in darray.dims:
raise ValueError(f"{label!r} is not a dimension")
# Then calculate rgb dimension if certain and check validity
could_be_color = [
label
for label in darray.dims
if darray[label].size in (3, 4) and label not in (x, y)
]
if rgb is None and not could_be_color:
raise ValueError(
"A 3-dimensional array was passed to imshow(), but there is no "
"dimension that could be color. At least one dimension must be "
"of size 3 (RGB) or 4 (RGBA), and not given as x or y."
)
if rgb is None and len(could_be_color) == 1:
rgb = could_be_color[0]
if rgb is not None and darray[rgb].size not in (3, 4):
raise ValueError(
"Cannot interpret dim %r of size %s as RGB or RGBA."
% (rgb, darray[rgb].size)
)
# If rgb dimension is still unknown, there must be two or three dimensions
# in could_be_color. We therefore warn, and use a heuristic to break ties.
if rgb is None:
assert len(could_be_color) in (2, 3)
rgb = could_be_color[-1]
warnings.warn(
"Several dimensions of this array could be colors. Xarray "
"will use the last possible dimension (%r) to match "
"matplotlib.pyplot.imshow. You can pass names of x, y, "
"and/or rgb dimensions to override this guess." % rgb
)
assert rgb is not None
# Finally, we pick out the red slice and delegate to the 2D version:
return _infer_xy_labels(darray.isel(**{rgb: 0}), x, y)
def _infer_xy_labels(darray, x, y, imshow=False, rgb=None):
"""
Determine x and y labels. For use in _plot2d
darray must be a 2 dimensional data array, or 3d for imshow only.
"""
if (x is not None) and (x == y):
raise ValueError("x and y cannot be equal.")
if imshow and darray.ndim == 3:
return _infer_xy_labels_3d(darray, x, y, rgb)
if x is None and y is None:
if darray.ndim != 2:
raise ValueError("DataArray must be 2d")
y, x = darray.dims
elif x is None:
_assert_valid_xy(darray, y, "y")
x = darray.dims[0] if y == darray.dims[1] else darray.dims[1]
elif y is None:
_assert_valid_xy(darray, x, "x")
y = darray.dims[0] if x == darray.dims[1] else darray.dims[1]
else:
_assert_valid_xy(darray, x, "x")
_assert_valid_xy(darray, y, "y")
if (
all(k in darray._level_coords for k in (x, y))
and darray._level_coords[x] == darray._level_coords[y]
):
raise ValueError("x and y cannot be levels of the same MultiIndex")
return x, y
def _assert_valid_xy(darray, xy, name):
"""
make sure x and y passed to plotting functions are valid
"""
# MultiIndex cannot be plotted; no point in allowing them here
multiindex = {darray._level_coords[lc] for lc in darray._level_coords}
valid_xy = (
set(darray.dims) | set(darray.coords) | set(darray._level_coords)
) - multiindex
if xy not in valid_xy:
valid_xy_str = "', '".join(sorted(valid_xy))
raise ValueError(f"{name} must be one of None, '{valid_xy_str}'")
def get_axis(figsize=None, size=None, aspect=None, ax=None, **kwargs):
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("matplotlib is required for plot.utils.get_axis")
if figsize is not None:
if ax is not None:
raise ValueError("cannot provide both `figsize` and " "`ax` arguments")
if size is not None:
raise ValueError("cannot provide both `figsize` and " "`size` arguments")
_, ax = plt.subplots(figsize=figsize)
elif size is not None:
if ax is not None:
raise ValueError("cannot provide both `size` and `ax` arguments")
if aspect is None:
width, height = mpl.rcParams["figure.figsize"]
aspect = width / height
figsize = (size * aspect, size)
_, ax = plt.subplots(figsize=figsize)
elif aspect is not None:
raise ValueError("cannot provide `aspect` argument without `size`")
if kwargs and ax is not None:
raise ValueError("cannot use subplot_kws with existing ax")
if ax is None:
ax = plt.gca(**kwargs)
return ax
def label_from_attrs(da, extra=""):
"""Makes informative labels if variable metadata (attrs) follows
CF conventions."""
if da.attrs.get("long_name"):
name = da.attrs["long_name"]
elif da.attrs.get("standard_name"):
name = da.attrs["standard_name"]
elif da.name is not None:
name = da.name
else:
name = ""
if da.attrs.get("units"):
units = " [{}]".format(da.attrs["units"])
else:
units = ""
return "\n".join(textwrap.wrap(name + extra + units, 30))
def _interval_to_mid_points(array):
"""
Helper function which returns an array
with the Intervals' mid points.
"""
return np.array([x.mid for x in array])
def _interval_to_bound_points(array):
"""
Helper function which returns an array
with the Intervals' boundaries.
"""
array_boundaries = np.array([x.left for x in array])
array_boundaries = np.concatenate((array_boundaries, np.array([array[-1].right])))
return array_boundaries
def _interval_to_double_bound_points(xarray, yarray):
"""
Helper function to deal with a xarray consisting of pd.Intervals. Each
interval is replaced with both boundaries. I.e. the length of xarray
doubles. yarray is modified so it matches the new shape of xarray.
"""
xarray1 = np.array([x.left for x in xarray])
xarray2 = np.array([x.right for x in xarray])
xarray = list(itertools.chain.from_iterable(zip(xarray1, xarray2)))
yarray = list(itertools.chain.from_iterable(zip(yarray, yarray)))
return xarray, yarray
def _resolve_intervals_1dplot(xval, yval, xlabel, ylabel, kwargs):
"""
Helper function to replace the values of x and/or y coordinate arrays
containing pd.Interval with their mid-points or - for step plots - double
points which double the length.
"""
# Is it a step plot? (see matplotlib.Axes.step)
if kwargs.get("drawstyle", "").startswith("steps-"):
# Convert intervals to double points
if _valid_other_type(np.array([xval, yval]), [pd.Interval]):
raise TypeError("Can't step plot intervals against intervals.")
if _valid_other_type(xval, [pd.Interval]):
xval, yval = _interval_to_double_bound_points(xval, yval)
if _valid_other_type(yval, [pd.Interval]):
yval, xval = _interval_to_double_bound_points(yval, xval)
# Remove steps-* to be sure that matplotlib is not confused
del kwargs["drawstyle"]
# Is it another kind of plot?
else:
# Convert intervals to mid points and adjust labels
if _valid_other_type(xval, [pd.Interval]):
xval = _interval_to_mid_points(xval)
xlabel += "_center"
if _valid_other_type(yval, [pd.Interval]):
yval = _interval_to_mid_points(yval)
ylabel += "_center"
# return converted arguments
return xval, yval, xlabel, ylabel, kwargs
def _resolve_intervals_2dplot(val, func_name):
"""
Helper function to replace the values of a coordinate array containing
pd.Interval with their mid-points or - for pcolormesh - boundaries which
increases length by 1.
"""
label_extra = ""
if _valid_other_type(val, [pd.Interval]):
if func_name == "pcolormesh":
val = _interval_to_bound_points(val)
else:
val = _interval_to_mid_points(val)
label_extra = "_center"
return val, label_extra
def _valid_other_type(x, types):
"""
Do all elements of x have a type from types?
"""
return all(any(isinstance(el, t) for t in types) for el in np.ravel(x))
def _valid_numpy_subdtype(x, numpy_types):
"""
Is any dtype from numpy_types superior to the dtype of x?
"""
# If any of the types given in numpy_types is understood as numpy.generic,
# all possible x will be considered valid. This is probably unwanted.
for t in numpy_types:
assert not np.issubdtype(np.generic, t)
return any(np.issubdtype(x.dtype, t) for t in numpy_types)
def _ensure_plottable(*args):
"""
Raise exception if there is anything in args that can't be plotted on an
axis by matplotlib.
"""
numpy_types = [np.floating, np.integer, np.timedelta64, np.datetime64, np.bool_]
other_types = [datetime]
try:
import cftime
cftime_datetime = [cftime.datetime]
except ImportError:
cftime_datetime = []
other_types = other_types + cftime_datetime
for x in args:
if not (
_valid_numpy_subdtype(np.array(x), numpy_types)
or _valid_other_type(np.array(x), other_types)
):
raise TypeError(
"Plotting requires coordinates to be numeric, boolean, "
"or dates of type numpy.datetime64, "
"datetime.datetime, cftime.datetime or "
f"pandas.Interval. Received data of type {np.array(x).dtype} instead."
)
if (
_valid_other_type(np.array(x), cftime_datetime)
and not nc_time_axis_available
):
raise ImportError(
"Plotting of arrays of cftime.datetime "
"objects or arrays indexed by "
"cftime.datetime objects requires the "
"optional `nc-time-axis` (v1.2.0 or later) "
"package."
)
def _is_numeric(arr):
numpy_types = [np.floating, np.integer]
return _valid_numpy_subdtype(arr, numpy_types)
def _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params):
cbar_kwargs.setdefault("extend", cmap_params["extend"])
if cbar_ax is None:
cbar_kwargs.setdefault("ax", ax)
else:
cbar_kwargs.setdefault("cax", cbar_ax)
# dont pass extend as kwarg if it is in the mappable
if hasattr(primitive, "extend"):
cbar_kwargs.pop("extend")
fig = ax.get_figure()
cbar = fig.colorbar(primitive, **cbar_kwargs)
return cbar
def _rescale_imshow_rgb(darray, vmin, vmax, robust):
assert robust or vmin is not None or vmax is not None
# Calculate vmin and vmax automatically for `robust=True`
if robust:
if vmax is None:
vmax = np.nanpercentile(darray, 100 - ROBUST_PERCENTILE)
if vmin is None:
vmin = np.nanpercentile(darray, ROBUST_PERCENTILE)
# If not robust and one bound is None, calculate the default other bound
# and check that an interval between them exists.
elif vmax is None:
vmax = 255 if np.issubdtype(darray.dtype, np.integer) else 1
if vmax < vmin:
raise ValueError(
"vmin=%r is less than the default vmax (%r) - you must supply "
"a vmax > vmin in this case." % (vmin, vmax)
)
elif vmin is None:
vmin = 0
if vmin > vmax:
raise ValueError(
"vmax=%r is less than the default vmin (0) - you must supply "
"a vmin < vmax in this case." % vmax
)
# Scale interval [vmin .. vmax] to [0 .. 1], with darray as 64-bit float
# to avoid precision loss, integer over/underflow, etc with extreme inputs.
# After scaling, downcast to 32-bit float. This substantially reduces
# memory usage after we hand `darray` off to matplotlib.
darray = ((darray.astype("f8") - vmin) / (vmax - vmin)).astype("f4")
return np.minimum(np.maximum(darray, 0), 1)
def _update_axes(
ax,
xincrease,
yincrease,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
):
"""
Update axes with provided parameters
"""
if xincrease is None:
pass
elif xincrease and ax.xaxis_inverted():
ax.invert_xaxis()
elif not xincrease and not ax.xaxis_inverted():
ax.invert_xaxis()
if yincrease is None:
pass
elif yincrease and ax.yaxis_inverted():
ax.invert_yaxis()
elif not yincrease and not ax.yaxis_inverted():
ax.invert_yaxis()
# The default xscale, yscale needs to be None.
# If we set a scale it resets the axes formatters,
# This means that set_xscale('linear') on a datetime axis
# will remove the date labels. So only set the scale when explicitly
# asked to. https://github.com/matplotlib/matplotlib/issues/8740
if xscale is not None:
ax.set_xscale(xscale)
if yscale is not None:
ax.set_yscale(yscale)
if xticks is not None:
ax.set_xticks(xticks)
if yticks is not None:
ax.set_yticks(yticks)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
def _is_monotonic(coord, axis=0):
"""
>>> _is_monotonic(np.array([0, 1, 2]))
True
>>> _is_monotonic(np.array([2, 1, 0]))
True
>>> _is_monotonic(np.array([0, 2, 1]))
False
"""
if coord.shape[axis] < 3:
return True
else:
n = coord.shape[axis]
delta_pos = coord.take(np.arange(1, n), axis=axis) >= coord.take(
np.arange(0, n - 1), axis=axis
)
delta_neg = coord.take(np.arange(1, n), axis=axis) <= coord.take(
np.arange(0, n - 1), axis=axis
)
return np.all(delta_pos) or np.all(delta_neg)
def _infer_interval_breaks(coord, axis=0, check_monotonic=False):
"""
>>> _infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
>>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1)
array([[-0.5, 0.5, 1.5],
[ 2.5, 3.5, 4.5]])
"""
coord = np.asarray(coord)
if check_monotonic and not _is_monotonic(coord, axis=axis):
raise ValueError(
"The input coordinate is not sorted in increasing "
"order along axis %d. This can lead to unexpected "
"results. Consider calling the `sortby` method on "
"the input DataArray. To plot data with categorical "
"axes, consider using the `heatmap` function from "
"the `seaborn` statistical plotting library." % axis
)
deltas = 0.5 * np.diff(coord, axis=axis)
if deltas.size == 0:
deltas = np.array(0.0)
first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)
last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)
trim_last = tuple(
slice(None, -1) if n == axis else slice(None) for n in range(coord.ndim)
)
return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis)
def _process_cmap_cbar_kwargs(
func,
data,
cmap=None,
colors=None,
cbar_kwargs: Union[Iterable[Tuple[str, Any]], Mapping[str, Any]] = None,
levels=None,
_is_facetgrid=False,
**kwargs,
):
"""
Parameters
==========
func : plotting function
data : ndarray,
Data values
Returns
=======
cmap_params
cbar_kwargs
"""
cbar_kwargs = {} if cbar_kwargs is None else dict(cbar_kwargs)
if "contour" in func.__name__ and levels is None:
levels = 7 # this is the matplotlib default
# colors is mutually exclusive with cmap
if cmap and colors:
raise ValueError("Can't specify both cmap and colors.")
# colors is only valid when levels is supplied or the plot is of type
# contour or contourf
if colors and (("contour" not in func.__name__) and (levels is None)):
raise ValueError("Can only specify colors with contour or levels")
# we should not be getting a list of colors in cmap anymore
# is there a better way to do this test?
if isinstance(cmap, (list, tuple)):
raise ValueError(
"Specifying a list of colors in cmap is deprecated. "
"Use colors keyword instead."
)
cmap_kwargs = {
"plot_data": data,
"levels": levels,
"cmap": colors if colors else cmap,
"filled": func.__name__ != "contour",
}
cmap_args = getfullargspec(_determine_cmap_params).args
cmap_kwargs.update((a, kwargs[a]) for a in cmap_args if a in kwargs)
if not _is_facetgrid:
cmap_params = _determine_cmap_params(**cmap_kwargs)
else:
cmap_params = {
k: cmap_kwargs[k]
for k in ["vmin", "vmax", "cmap", "extend", "levels", "norm"]
}
return cmap_params, cbar_kwargs
| apache-2.0 |
snurkabill/pydeeplearn | code/old-version/PCA.py | 3 | 7818 | __author__ = "Mihaela Rosca"
__contact__ = "[email protected]"
import heapq
import matplotlib.pyplot as plt
import numpy
import os
import scipy
import scipy.linalg
from os.path import isfile, join
from scipy import misc
# Import all common functions
from common import *
# The directory path to the images
PICTURE_PATH = "/pics/cambrdige_pics/"
# The current directory where the script is ran
currentDir = os.path.dirname(os.path.abspath(__file__))
"""
Converts the data to zero mean data.
"""
def convertDataToZeroMean(data):
means = scipy.mean(data, axis=0)
rows, cols = data.shape
zeroMean = numpy.zeros((rows, cols))
for i in xrange(rows):
zeroMean[i] = data[i] - means
assert zeroMean.shape == data.shape
return zeroMean
"""
Uses a heuristic to evaluate how many dimensions should the data be reduced
to.
Arguments:
eigenValues:
The eigen values of the covariance matrix, or numbers proportional to them.
Should be a numpy 1-D array.
Returns:
The dimension the data should be reduced to.
"""
def dimensionFromEigenIndividualVariance(eigenValues):
threshold = 0.01
dimension = 0
s = numpy.sum(eigenValues)
print "sum eigen" + str(s)
for eigen in eigenValues:
r = eigen / s
if r > threshold:
dimension += 1
return dimension
# requires the eigen values to be sorted before
def dimensionFromEigenTotalVariance(eigenValues):
threshold = 0.95
dimension = 0
s = numpy.sum(eigenValues)
print "sum eigen" + str(s)
current = 0
for eigen in eigenValues:
r = (eigen / s)
current += r
if current >= threshold:
break
dimension += 1
return dimension
"""
This method uses the Karhunen Lowe transform to fastly compute the
eigen vaues of the data.
It is faster than the SVD method below, but can be prone to floating point
errors more than the SVD one.
Arguments:
train:
Numpy array of arrays
dimension: the dimension to which to reduce the size of the data set.
Returns:
The principal components of the data.
"""
# Returns the principal components of the given training
# data by commputing the principal eigen vectors of the
# covariance matrix of the data
def pca(train, dimension):
# Use the Karhunen Lowe transform to fastly compute
# the principal components.
rows, cols = train.shape
# Step1: Get the mean of each column of the data
# Ie create the average image
u = convertDataToZeroMean(train)
# Step2: Compute the eigen values of the U * U^T matrix
# the size of U * U^T is rows * rows (ie the number of data points you have
# in your training)
eigVals, eigVecs = scipy.linalg.eig(u.dot(u.T))
# Step3: Compute the eigen values of U^T*U from the eigen values of U * U^T
bigEigVecs = numpy.zeros((rows, cols))
for i in xrange(rows):
bigEigVecs[i] = u.T.dot(eigVecs[:, i])
# Step 4: Normalize the eigen vectors to get orthonormal components
bigEigVecs = map(lambda x: x / scipy.linalg.norm(x), bigEigVecs)
eigValsBigVecs = zip(eigVals, bigEigVecs)
sortedEigValsBigVecs = sorted(eigValsBigVecs, key=lambda x : x[0], reverse=True)
index = 0
if dimension == None:
# Get the eigen values
# Note that these are not the eigen values of the covariance matrix
# but the eigen values of U * U ^T
# however, this is fine because they just differ by a factor
# so the ratio between eigen values will be preserved
eigenValues = map(lambda x : x[0], sortedEigValsBigVecs)
dimension = dimensionFromEigenTotalVariance(eigenValues)
print "Using PCA dimension " + str(dimension)
result = np.empty(rows, dimension)
for eigVal, vector in sortedEigValsBigVecs:
if index >= dimension:
break
if eigVal <=0:
print "Warning: Non-positive eigen value"
result[:, index] = vector
index = index + 1
return result
"""
Arguments:
train:
Numpy array of arrays
dimension: the dimension to which to reduce the size of the data set.
Returns:
The principal components of the data.
This method should be preferred over the above: it is well known that the
SVD methods are more stable than the ones that require the computation of
the eigen values and eigen vectors.
For more detail see:
http://math.stackexchange.com/questions/3869/what-is-the-intuitive-relationship-between-svd-and-pca
"""
def pcaWithSVD(train, dimension=None):
zeroMean = convertDataToZeroMean(train)
# SVD guaranteed that the singular values are in non-increasing order
# this means that the u's are already ordered as required, according
# to the magnitute of the eigen values
u, s, vh = scipy.linalg.svd(zeroMean)
if dimension == None:
# Get the eigen values from the singular values
eigenValues = s ** 2;
dimension = dimensionFromEigenTotalVariance(eigenValues)
print "Using PCA dimension " + str(dimension)
return vh[0:dimension-1]
"""
Arguments:
pcaMethod: a method to use for PCA.
images: A python list of images that have to be of the same size.
dimension: the dimension to which to reduce the size of the data set.
Returns:
A tuple:
The first element of the tuple is formed from the eigen faces of given
images.
The second element of the tuple if formed from the vector version of the
eigen faces. This is kept for optimization reasons.
"""
def getEigenFaces(pcaMethod, images, dimension=None):
imgSize = images[0].shape;
# this call should not be here: the code should assume that the images have
# been transofrmed to vectors before
imgs = imagesToVectors(images)
vectors = pcaMethod(imgs, dimension)
eigenFaces = map(lambda x: vectorToImage(x, imgSize), vectors)
return (eigenFaces, vectors)
def reduce(principalComponents, vectors):
assert len(principalComponents) > 0
print principalComponents[0].shape
principalComponents = np.array(principalComponents)
lowDimRepresentation = np.dot(vectors, principalComponents.T)
# lowDimRepresentation = map(lambda x : vectors.dot(x), principalComponents)
# sameDimRepresentation = \
# sum([ x * y for x, y in zip(principalComponents, lowDimRepresentation)])
# TODO: do this with einsum
sameDimRepresentation = lowDimRepresentation[:, np.newaxis] * principalComponents.T
sameDimRepresentation = sameDimRepresentation.sum(axis=2)
# TODO: create the proper thing here so that you can
# easily see what the ouput is
return (lowDimRepresentation, sameDimRepresentation)
"""
Reduces a 2D image represented by a numpy 2D array of integer values(pixels)
to a lower dimension, dictated by the number of principal components.
"""
def reduceImageToLowerDimensions(principalComponents, image2D):
assert len(principalComponents) > 0
size = principalComponents[0].shape
vector = vectorToImage(image2D, size)
lowDimRepresentation = map(lambda x : x.T.dot(vector), principalComponents)
sameDimRepresentation = \
sum([ x * y for x, y in zip(principalComponents, lowDimRepresentation)])
return (lowDimRepresentation, sameDimRepresentation)
def main():
# Load all the image files in the current directory
picFiles = []
path = currentDir + PICTURE_PATH
for root, dirs, files in os.walk(path):
if root != path:
picFiles += map(lambda x: os.path.join(root, x), files)
print len(picFiles)
imgs = map(lambda x: misc.imread(x, flatten=True), picFiles)
eigenFaces, principalComponents = getEigenFaces(pca, imgs)
# plt.imshow(eigenFaces[0], cmap=plt.cm.gray)
# plt.show()
lowDimRepresentation, sameDimRepresentation = \
reduceImageToLowerDimensions(principalComponents, imgs[0])
plt.imshow(imgs[0], cmap=plt.cm.gray)
plt.show()
image2D = vectorToImage(sameDimRepresentation, imgs[0].shape)
plt.imshow(image2D, cmap=plt.cm.gray)
plt.show()
print "done"
if __name__ == '__main__':
main() | bsd-3-clause |
jsilter/scipy | scipy/optimize/nonlin.py | 1 | 46408 | r"""
=================
Nonlinear solvers
=================
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
========
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
========
Small problem
-------------
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
Large problem
-------------
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <[email protected]>
# Distributed under the same license as Scipy.
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy.lib.six import callable, exec_
from scipy.lib.six import xrange
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
x0 : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = maxnorm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2003).
.. [2] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
import inspect
args, varargs, varkw, defaults = inspect.getargspec(jac.__init__)
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec_(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
| bsd-3-clause |
nlholdem/icodoom | ICO1/run_agentOld.py | 1 | 21393 | from __future__ import print_function
import numpy as np
import cv2
#import tensorflow as tf
import sys
sys.path.append('./agent')
sys.path.append('./deep_feedback_learning')
from agent.doom_simulator import DoomSimulator
#from agent.agent import Agent
from deep_ico.deep_feedback_learning import DeepFeedbackLearning
import threading
from matplotlib import pyplot as plt
width = 320
widthIn = 320
height = 240
heightIn = 240
nFiltersInput = 3
nFiltersHidden = 3
nHidden0 = 4
# nFiltersHidden = 0 means that the layer is linear without filters
minT = 3
maxT = 15
deepBP = DeepFeedbackLearning(width * height, [16, 10, 10], 1, nFiltersInput, nFiltersHidden, minT, maxT)
# init the weights
deepBP.getLayer(0).setConvolution(width, height)
deepBP.initWeights(1E-6, 1)
deepBP.setBias(1)
deepBP.setAlgorithm(DeepFeedbackLearning.ico)
deepBP.setLearningRate(1E-4)
deepBP.seedRandom(89)
deepBP.setUseDerivative(1)
preprocess_input_images = lambda x: x / 255. - 0.5
sharpen = np.array((
[0, 1, 0],
[1, 4, 1],
[0, 1, 0]), dtype="int")
edge = np.array((
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]), dtype="int")
plt.ion()
plt.show()
ln = False
def getWeights(neuron):
n_neurons = deepBP.getLayer(0).getNneurons()
n_inputs = deepBP.getLayer(0).getNeuron(neuron).getNinputs()
weights = np.zeros(n_inputs)
for i in range(n_inputs):
if deepBP.getLayer(0).getNeuron(neuron).getMask(i):
weights[i] = deepBP.getLayer(0).getNeuron(neuron).getAvgWeight(i)
else:
weights[i] = np.nan
return weights.reshape(height,width)
def plotWeights():
global ln
while True:
if ln:
ln.remove()
w1 = getWeights(0)
for i in range(1,deepBP.getLayer(0).getNneurons()):
w2 = getWeights(i)
w1 = np.where(np.isnan(w2),w1,w2)
ln = plt.imshow(w1,cmap='gray')
plt.draw()
print("*** UPDATE PLOT ***")
plt.pause(10)
def getColourImbalance(img, colour):
if(img.shape[0]) != 3:
print("Error in getColourImbalance: wrong number of image channels: ", img.shape)
return 0.
width = int(img.shape[2]/2)
height = int(img.shape[1]/2)
print ("width: ", width, "height", height)
avgLeft = np.average(img[:,:,:width], axis=1)
avgLeft = np.average(avgLeft, axis=1)
# avgLeft = np.dot(avgLeft, colour)
avgRight = np.average(img[:,:,width:], axis=1)
avgRight = np.average(avgRight, axis=1)
# avgRight = np.dot(avgRight, colour)
avgTop = np.average(img[:, :height, :], axis=1)
avgTop = np.average(avgTop, axis=1)
# avgTop = np.dot(avgTop, colour)
avgBottom = np.average(img[:, height:, :], axis=1)
avgBottom = np.average(avgBottom, axis=1)
# avgBottom = np.dot(avgBottom, colour)
print("avgLeft: ", avgLeft, " avgRight: ", avgRight, "avgTop", avgTop, "avgBottom", avgBottom)
return 1.
def getMaxColourPos(img, colour):
img = np.array(img, dtype='float64')
width = int(img.shape[1])
height = int(img.shape[0])
# img[:,10,10] = [0,0,255]
diff = np.ones(img.shape)
diff[:,:,0] = colour[0]
diff[:,:,1] = colour[1]
diff[:,:,2] = colour[2]
diff = np.absolute(np.add(diff, (-1*img)))
cv2.imwrite("/home/paul/tmp/Images/Positive/diff-" + ".jpg", diff)
diff = np.sum(diff, axis=2)
cv2.imwrite("/home/paul/tmp/Images/Positive/diffGrey-" + ".jpg", diff)
indx = np.argmin(diff)
indx0 = int(indx / width)
indx1 = indx % width
pts = np.asarray(np.where((np.mean(diff) - diff) > 150))
if (pts.shape[1]>0):
bottomLeft = np.array([np.amin(pts[1]), np.amin(pts[0])])
topRight = np.array([np.amax(pts[1]), np.amax(pts[0])])
else:
bottomLeft = []
topRight = []
print("COLOUR: ", [indx1, indx0])
# cv2.imwrite("/home/paul/tmp/Images/Positive/rect-" + ".jpg", img)
# print ("Colour diff: ", np.mean(diff) - diff[indx0,indx1])
return np.array([indx1, indx0]), bottomLeft, topRight, np.mean(diff) - diff[indx0,indx1]
def savePosImage(curr_step, centre, x1, y1, x2, y2, _img, myFile, width, height):
print ("img shape: ", img2.shape)
myFile.write("/home/paul/tmp/Images/" + str(curr_step) + ".jpg"
+ " 1"
+ " " + str(x1) + " " + str(y1)
+ " " + str(x2) + " " + str(y2) + "\n")
img = np.zeros(_img.shape,dtype=np.uint8)
outImage = Image.fromarray(img)
outImage.save("/home/paul/tmp/Images/Positive/" + str(curr_step) + ".jpg")
def saveNegImage(curr_step, img2, myFile, width, height):
myFile.write("/home/paul/tmp/Images/" + str(curr_step) + ".jpg\n")
# img2 = np.rollaxis(img2, 0, 3)
img = Image.fromarray(img2)
img.save("/home/paul/tmp/Images/Negative/" + str(curr_step) + ".jpg")
def main():
## Simulator
simulator_args = {}
simulator_args['config'] = 'config/config.cfg'
simulator_args['resolution'] = (widthIn,heightIn)
simulator_args['frame_skip'] = 1
simulator_args['color_mode'] = 'RGB24'
simulator_args['game_args'] = "+name ICO +colorset 7"
## Agent
agent_args = {}
# preprocessing
preprocess_input_images = lambda x: x / 255. - 0.5
agent_args['preprocess_input_images'] = lambda x: x / 255. - 0.5
agent_args['preprocess_input_measurements'] = lambda x: x / 100. - 0.5
agent_args['num_future_steps'] = 6
pred_scale_coeffs = np.expand_dims(
(np.expand_dims(np.array([8., 40., 1.]), 1) * np.ones((1, agent_args['num_future_steps']))).flatten(), 0)
agent_args['meas_for_net_init'] = range(3)
agent_args['meas_for_manual_init'] = range(3, 16)
agent_args['resolution'] = (width,height)
# just use grayscale for nnet inputs
agent_args['num_channels'] = 1
# net parameters
agent_args['conv_params'] = np.array([(16, 5, 4), (32, 3, 2), (64, 3, 2), (128, 3, 2)],
dtype=[('out_channels', int), ('kernel', int), ('stride', int)])
agent_args['fc_img_params'] = np.array([(128,)], dtype=[('out_dims', int)])
agent_args['fc_meas_params'] = np.array([(128,), (128,), (128,)], dtype=[('out_dims', int)])
agent_args['fc_joint_params'] = np.array([(256,), (256,), (-1,)], dtype=[('out_dims', int)])
agent_args['target_dim'] = agent_args['num_future_steps'] * len(agent_args['meas_for_net_init'])
agent_args['n_actions'] = 7
# experiment arguments
agent_args['test_objective_params'] = (np.array([5, 11, 17]), np.array([1., 1., 1.]))
agent_args['history_length'] = 3
agent_args['history_length_ico'] = 3
historyLen = agent_args['history_length']
print ("HistoryLen: ", historyLen)
print('starting simulator')
simulator = DoomSimulator(simulator_args)
num_channels = simulator.num_channels
print('started simulator')
agent_args['state_imgs_shape'] = (
historyLen * num_channels, simulator.resolution[1], simulator.resolution[0])
agent_args['n_ffnet_inputs'] = 2*(agent_args['resolution'][0]*agent_args['resolution'][1])
agent_args['n_ffnet_hidden'] = np.array([50,5])
agent_args['n_ffnet_outputs'] = 1
agent_args['n_ffnet_act'] = 7
agent_args['n_ffnet_meas'] = simulator.num_meas
agent_args['learning_rate'] = 1E-4
if 'meas_for_net_init' in agent_args:
agent_args['meas_for_net'] = []
for ns in range(historyLen):
agent_args['meas_for_net'] += [i + simulator.num_meas * ns for i in agent_args['meas_for_net_init']]
agent_args['meas_for_net'] = np.array(agent_args['meas_for_net'])
else:
agent_args['meas_for_net'] = np.arange(historyLen * simulator.num_meas)
if len(agent_args['meas_for_manual_init']) > 0:
agent_args['meas_for_manual'] = np.array([i + simulator.num_meas * (historyLen - 1) for i in
agent_args[
'meas_for_manual_init']]) # current timestep is the last in the stack
else:
agent_args['meas_for_manual'] = []
agent_args['state_meas_shape'] = (len(agent_args['meas_for_net']),)
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
# agent = Agent(sess, agent_args)
# agent.load('/home/paul/Dev/GameAI/vizdoom_cig2017/icolearner/ICO1/checkpoints/ICO-8600')
# print("model loaded..")
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
img_buffer = np.zeros(
(historyLen, simulator.resolution[1], simulator.resolution[0], num_channels), dtype='uint8')
meas_buffer = np.zeros((historyLen, simulator.num_meas))
act_buffer = np.zeros((historyLen, 7))
act_buffer_ico = np.zeros((agent_args['history_length_ico'], 7))
curr_step = 0
old_step = -1
term = False
print ("state_meas_shape: ", meas_buffer.shape, " == ", agent_args['state_meas_shape'])
print ("act_buffer_shape: ", act_buffer.shape)
# ag = Agent(agent_args)
diff_y = 0
diff_x = 0
diff_z = 0
diff_theta = 0
iter = 1
epoch = 200
radialFlowLeft = 30.
radialFlowRight = 30.
radialFlowInertia = 0.4
radialGain = 4.
rotationGain = 50.
errorThresh = 10.
updatePtsFreq = 50
skipImage = 1
skipImageICO = 5
reflexGain = 0.01
oldHealth = 0.
# create masks for left and right visual fields - note that these only cover the upper half of the image
# this is to help prevent the tracking getting confused by the floor pattern
half_height = round(height/2)
half_width = round(width/2)
maskLeft = np.zeros([height, width], np.uint8)
maskLeft[half_height:, :half_width] = 1.
maskRight = np.zeros([height, width], np.uint8)
maskRight[half_height:, half_width:] = 1.
netErr = np.zeros((width,height))
# deepIcoEfference = Deep_ICO(simulator_args['resolution'][0] * simulator_args['resolution'][1] + 7, 10, 1)
nh = np.asarray([36,36])
# deepIcoEfference = Deep_ICO_Conv(1, [1], 1, Deep_ICO_Conv.conv)
# deepIcoEfference = Deep_ICO_Conv(simulator_args['resolution'][0] * simulator_args['resolution'][1] + 7,
# nh, simulator_args['resolution'][0] * simulator_args['resolution'][1], Deep_ICO_Conv.conv)
# deepIcoEfference.setLearningRate(0.01)
# deepIcoEfference.setAlgorithm(Deep_ICO.backprop)
# print ("Model type: ", "ff" if deepIcoEfference.getModelType() == 0 else "conv")
# deepIcoEfference.initWeights(1 / (np.sqrt(float(simulator_args['resolution'][0] * simulator_args['resolution'][1] + 7))))
# deepIcoEfference.initWeights(0.0)
outputImage = np.zeros(simulator_args['resolution'][0] * simulator_args['resolution'][1])
imageDiff = np.zeros(simulator_args['resolution'][0] * simulator_args['resolution'][1])
outputArray = np.zeros(1) #deepIcoEfference.getNoutputs())
lk_params = dict(winSize=(15, 15), maxLevel=2, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict(maxCorners=500, qualityLevel=0.03, minDistance=7, blockSize=7)
imgCentre = np.array([simulator_args['resolution'][0] / 2, simulator_args['resolution'][1] /2])
print ("Image centre: ", imgCentre)
simpleInputs1 = np.zeros((width, height))
simpleInputs2 = np.zeros((width, height))
input_buff = np.zeros((1,width*height))
target_buff = np.zeros((1,1))
t = threading.Thread(target=plotWeights)
t.start()
while not term:
if curr_step < historyLen:
curr_act = np.zeros(7).tolist()
img, meas, rwrd, term = simulator.step(curr_act)
print("Image: ", img.shape, " max: ", np.amax(img), " min: ", np.amin(img))
if curr_step == 0:
p0Left = cv2.goodFeaturesToTrack(img[:,:,0], mask=maskLeft, **feature_params)
p0Right = cv2.goodFeaturesToTrack(img[:,:,0], mask=maskRight, **feature_params)
img_buffer[curr_step % historyLen] = img
meas_buffer[curr_step % historyLen] = meas
act_buffer[curr_step % historyLen] = curr_act[:7]
else:
img1 = img_buffer[(curr_step-2) % historyLen,:,:,:]
img2 = img_buffer[(curr_step-1) % historyLen,:,:,:]
state = simulator._game.get_state()
stateImg = state.screen_buffer
greyImg1 = np.sum(img1, axis=0)
greyImg2 = cv2.resize(stateImg, (width,height))
greyImg2 = np.array(np.sum(greyImg2, axis=2)/3, dtype='uint8')
if(curr_step % updatePtsFreq == 0):
p0Left = cv2.goodFeaturesToTrack(img[:,:,0], mask=maskLeft, **feature_params)
p0Right = cv2.goodFeaturesToTrack(img[:,:,0], mask=maskRight, **feature_params)
p1Left, st, err = cv2.calcOpticalFlowPyrLK(img1[:,:,0], img2[:,:,0], p0Left, None, **lk_params)
p1Right, st, err = cv2.calcOpticalFlowPyrLK(img1[:,:,0], img2[:,:,0], p0Right, None, **lk_params)
flowLeft = (p1Left - p0Left)[:,0,:]
flowRight = (p1Right - p0Right)[:,0,:]
radialFlowTmpLeft = 0
radialFlowTmpRight = 0
for i in range(0, len(p0Left)):
radialFlowTmpLeft += ((p0Left[i,0,:] - imgCentre)).dot(flowLeft[i,:]) / float(len(p0Left))
for i in range(0, len(p0Right)):
radialFlowTmpRight += ((p0Right[i,0,:] - imgCentre)).dot(flowRight[i,:]) / float(len(p0Right))
rotation = act_buffer[(curr_step - 1) % historyLen][6]
forward = act_buffer[(curr_step - 1) % historyLen][3]
# keep separate radial errors for left and right fields
radialFlowLeft = radialFlowLeft + radialFlowInertia * (radialFlowTmpLeft - radialFlowLeft)
radialFlowRight = radialFlowRight + radialFlowInertia * (radialFlowTmpRight - radialFlowRight)
expectFlowLeft = radialGain * forward + (rotationGain * rotation if rotation < 0. else 0.)
expectFlowRight = radialGain * forward - (rotationGain * rotation if rotation > 0. else 0.)
flowErrorLeft = forward * (expectFlowLeft - radialFlowLeft) / (1. + rotationGain * np.abs(rotation))
flowErrorRight = forward * (expectFlowRight - radialFlowRight) / (1. + rotationGain * np.abs(rotation))
flowErrorLeft = flowErrorLeft if flowErrorLeft > 0. else 0.
flowErrorRight = flowErrorRight if flowErrorRight > 0. else 0.
icoSteer = 0.
if curr_step > 100:
health = meas[1]
# Don't run any networks when the player is dead!
if (health < 101. and health > 0.):
#print (curr_step)
icoInLeft = (flowErrorLeft - errorThresh) if (flowErrorLeft - errorThresh) > 0. else 0. / reflexGain
icoInRight = (flowErrorRight - errorThresh) if (flowErrorRight - errorThresh) > 0. else 0. / reflexGain
icoInSteer = ((flowErrorRight - errorThresh) if (flowErrorRight - errorThresh) > 0. else 0. / reflexGain -
(flowErrorLeft - errorThresh) if (flowErrorLeft - errorThresh) > 0. else 0. / reflexGain)
centre1, bottomLeft1, topRight1, colourStrength1 = getMaxColourPos(img1, [255, 0, 0])
centre2, bottomLeft2, topRight2, colourStrength2 = getMaxColourPos(img2, [255, 0, 0])
colourSteer = centre2[0]
# get the setpoint in the -.9/+.9 range
simpleInputs1[:,:] = 0.1*np.random.rand(width, height)
simpleInputs2[:,:] = 0.1*np.random.rand(width, height)
sp = 1.8*(colourSteer - imgCentre[0]) / width
print ("ColourSteer: ", colourSteer, " ColourStrength: ", colourStrength2)
if(colourStrength2 > 150.):
#print ("ColourSteer: ", colourSteer, " ColourStrength: ", colourStrength)
#inputs[colourSteer,:] = colourStrength / 300.
simpleInputs2[bottomLeft2[0]:topRight2[0], bottomLeft2[1]:topRight2[1]] = 1.
#print(bottomLeft[0], bottomLeft[1], topRight[0], topRight[1], np.sum(inputs))
else:
colourStrength2 = 0.
sp =0
if (colourStrength1 > 150.):
simpleInputs1[bottomLeft1[0]:topRight1[0], bottomLeft1[1]:topRight1[1]] = 1.
netErr[:,:] = 0.
#deepBP.doStep(np.ndarray.flatten(inputs), np.ndarray.flatten(netErr))
#icoSteer = deepBP.getOutput(0)
#delta = sp - icoSteer
delta = 0.06 * colourStrength2 * (colourSteer - imgCentre[0])/width
#delta = 0.6 * max(min((icoInSteer), 5.), -5.)
#delta = 1. - icoSteer
#input_buff[0,:] = preprocess_input_images(np.ndarray.flatten(img2[2,:,:]))
#input_buff[0,:] = np.ndarray.flatten(inputs)
#input_buff[0,:] = np.concatenate([np.ndarray.flatten(greyImg1), np.ndarray.flatten(greyImg2)])
greyImg2 = cv2.filter2D(greyImg2, -1, edge)
input_buff[0,:] = np.ndarray.flatten(preprocess_input_images(greyImg2))
target_buff[0,0] = delta
if (False):
deepBP.setLearningRate(0.)
#net_output = np.ndarray.flatten(agent.test_ffnet(input_buff))[0]
#else:
#net_output = np.ndarray.flatten(agent.learn_ffnet(input_buff, target_buff))[0]
netErr[:,:] = delta
deepBP.doStep(preprocess_input_images(greyImg2.flatten()), netErr.flatten())
icoSteer = deepBP.getOutput(0)
#print ("In ", inputs[colourSteer], "Error: ", netErr[0,0], "Wt ", deepBP.getLayer(0).getNeuron(0).getWeight(int(colourSteer))
# , "WtOut ", deepBP.getLayer(1).getNeuron(0).getWeight(0)
#, " Out ", deepBP.getLayer(0).getNeuron(0).getOutput(), " NErr ", deepBP.getLayer(0).getNeuron(0).getError(), " OUT ", 40.*icoSteer
#, " OUTErr ", deepBP.getLayer(1).getNeuron(0).getError())
#deepBP.doStep(np.ndarray.flatten(preprocess_input_images(img_buffer[(curr_step - 1) % historyLen, 2, :, :])), np.ndarray.flatten(netErr))
# deepBP.doStep(np.ndarray.flatten(inputs), np.ndarray.flatten(netErr))
#deepBP.doStep(np.ndarray.flatten(preprocess_input_images(img_buffer[(curr_step - 1) % historyLen, 0, :, :])), [0.0001 * colourStrength * (colourSteer - imgCentre[0])])
#deepBP.doStep([(colourSteer - imgCentre[0])/width], [0.0001*colourStrength * (colourSteer - imgCentre[0])])
print (" ** ", curr_step, icoSteer, " ", delta, " ", colourStrength2)
#print (colourSteer, " In ", inputs[colourSteer], "Error: ", netErr[0,0], "Wt ", deepBP.getLayer(0).getNeuron(0).getWeight(int(colourSteer))
# , " NOut ", deepBP.getLayer(0).getNeuron(0).getOutput(), " NErr ", deepBP.getLayer(0).getNeuron(0).getError(), " OUT ", 40.*icoSteer
# , "OUTRefl ", diff_theta + 0.03 * colourStrength * (colourSteer - imgCentre[0])/width
# , " OUTErr ", deepBP.getLayer(1).getNeuron(0).getError())
diff_theta = 0.6 * max(min((icoInSteer), 5.), -5.)
diff_theta = diff_theta + 0.01 * colourStrength2 * (colourSteer - imgCentre[0])/width
diff_theta = diff_theta + 10. * icoSteer
#diff_theta = diff_theta + 20. * net_output
curr_act = np.zeros(7).tolist()
curr_act[0] = 0
curr_act[1] = 0
curr_act[2] = 0
curr_act[3] = curr_act[3] + diff_z
curr_act[3] = 0.
curr_act[4] = 0
curr_act[5] = 0
curr_act[6] = curr_act[6] + diff_theta
oldHealth = health
img, meas, rwrd, term = simulator.step(curr_act)
if (not (meas is None)) and meas[0] > 30.:
meas[0] = 30.
if not term:
img_buffer[curr_step % historyLen] = img
meas_buffer[curr_step % historyLen] = meas
act_buffer[curr_step % historyLen] = curr_act[:7]
#if curr_step % epoch == 0:
# agent.save('/home/paul/Dev/GameAI/vizdoom_cig2017/icolearner/ICO1/checkpoints', curr_step)
# np.save('/home/paul/tmp/icoSteer-' + str(curr_step), icoSteer.weights)
# np.save('/home/paul/tmp/imageDiff-' + str(curr_step), imageDiff)
# np.save('/home/paul/tmp/icoDetect-' + str(curr_step), icoDetect.weights)
# icoSteer.saveInputs(curr_step)
curr_step += 1
simulator.close_game()
# ag.save('/home/paul/Dev/GameAI/vizdoom_cig2017/icolearner/ICO1/checkpoints/' + 'hack-' + str(iter))
if __name__ == '__main__':
main()
| gpl-3.0 |
glennq/scikit-learn | benchmarks/bench_sgd_regression.py | 61 | 5612 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
plt.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("RMSE")
plt.title("Test error - %d features" % list_n_features[j])
i += 1
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("Time [sec]")
plt.title("Training time - %d features" % list_n_features[j])
i += 1
plt.subplots_adjust(hspace=.30)
plt.show()
| bsd-3-clause |
mfjb/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
andysim/psi4 | psi4/driver/qcdb/dbwrap.py | 3 | 174710 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import math
try:
import cPickle as pickle
except ImportError:
import pickle
import itertools
# from collections import defaultdict
try:
from collections import OrderedDict
except ImportError:
from oldpymodules import OrderedDict
from .exceptions import *
from .molecule import Molecule
from .modelchems import Method, BasisSet, Error, methods, bases, errors, pubs
from . import psiutil
from . import textables
def initialize_errors():
"""Form OrderedDict of all possible statistical measures set to None"""
error = OrderedDict()
for e in ['e', 'pe', 'pbe', 'pce']:
for m in ['pex', 'nex', 'max', 'min', 'm', 'ma', 'rms', 'std']:
error[m + e] = None
return error
def initialize_errors_elaborate(e=None, pe=None, pbe=None, pce=None, extrema=True):
error = OrderedDict()
error['maxe'] = None if (e is None or not extrema) else e # LD_XA
error['mine'] = None if (e is None or not extrema) else e # LD_XI
error['me'] = None if e is None else 0.0 # LD_MS
error['mae'] = None if e is None else 0.0 # LD_MA
error['rmse'] = None if e is None else 0.0 # LD_RA
error['stde'] = None if e is None else 0.0
error['maxpe'] = None if (pe is None or not extrema) else pe # FD_XA
error['minpe'] = None if (pe is None or not extrema) else pe # FD_XI
error['mpe'] = None if pe is None else 0.0 # FD_MS
error['mape'] = None if pe is None else 0.0 # FD_MA
error['rmspe'] = None if pe is None else 0.0 # FD_RA
error['stdpe'] = None if pe is None else 0.0
error['maxpbe'] = None if (pbe is None or not extrema) else pbe # BD_XA
error['minpbe'] = None if (pbe is None or not extrema) else pbe # BD_XI
error['mpbe'] = None if pbe is None else 0.0 # BD_MS
error['mapbe'] = None if pbe is None else 0.0 # BD_MA
error['rmspbe'] = None if pbe is None else 0.0 # BD_RA
error['stdpbe'] = None if pbe is None else 0.0
error['maxpce'] = None if (pce is None or not extrema) else pce # BD_XA
error['minpce'] = None if (pce is None or not extrema) else pce # BD_XI
error['mpce'] = None if pce is None else 0.0 # BD_MS
error['mapce'] = None if pce is None else 0.0 # BD_MA
error['rmspce'] = None if pce is None else 0.0 # BD_RA
error['stdpce'] = None if pce is None else 0.0
return error
def average_errors(*args):
"""Each item in *args* should be an error dictionary. Performs
average-like operation over all items, which should be error
dictionaries, in *args*. Defined for ME, MAE, STDE, and their
relative-error variants. None returned for undefined statistics or
when an item is missing.
"""
Ndb = float(len(args))
avgerror = initialize_errors()
try:
avgerror['pexe'] = max([x['pexe'] for x in args])
avgerror['nexe'] = min([x['nexe'] for x in args])
avgerror['maxe'] = max([x['maxe'] for x in args], key=lambda x: abs(x))
avgerror['mine'] = min([x['mine'] for x in args], key=lambda x: abs(x))
avgerror['me'] = sum([x['me'] for x in args]) / Ndb
avgerror['mae'] = sum([x['mae'] for x in args]) / Ndb
avgerror['rmse'] = sum([x['rmse'] for x in args]) / Ndb # TODO: unsure of op validity
avgerror['stde'] = math.sqrt(sum([x['stde'] ** 2 for x in args]) / Ndb)
avgerror['pexpe'] = max([x['pexpe'] for x in args])
avgerror['nexpe'] = min([x['nexpe'] for x in args])
avgerror['maxpe'] = max([x['maxpe'] for x in args], key=lambda x: abs(x))
avgerror['minpe'] = min([x['minpe'] for x in args], key=lambda x: abs(x))
avgerror['mpe'] = sum([x['mpe'] for x in args]) / Ndb
avgerror['mape'] = sum([x['mape'] for x in args]) / Ndb
avgerror['rmspe'] = sum([x['rmspe'] for x in args]) / Ndb # TODO: unsure of op validity
avgerror['stdpe'] = math.sqrt(sum([x['stdpe'] * x['stdpe'] for x in args]) / Ndb)
avgerror['pexpbe'] = max([x['pexpbe'] for x in args])
avgerror['nexpbe'] = min([x['nexpbe'] for x in args])
avgerror['maxpbe'] = max([x['maxpbe'] for x in args], key=lambda x: abs(x))
avgerror['minpbe'] = min([x['minpbe'] for x in args], key=lambda x: abs(x))
avgerror['mpbe'] = sum([x['mpbe'] for x in args]) / Ndb
avgerror['mapbe'] = sum([x['mapbe'] for x in args]) / Ndb
avgerror['rmspbe'] = sum([x['rmspbe'] for x in args]) / Ndb # TODO: unsure of op validity
avgerror['stdpbe'] = math.sqrt(sum([x['stdpbe'] * x['stdpbe'] for x in args]) / Ndb)
avgerror['pexpce'] = max([x['pexpce'] for x in args])
avgerror['nexpce'] = min([x['nexpce'] for x in args])
avgerror['maxpce'] = max([x['maxpce'] for x in args], key=lambda x: abs(x))
avgerror['minpce'] = min([x['minpce'] for x in args], key=lambda x: abs(x))
avgerror['mpce'] = sum([x['mpce'] for x in args]) / Ndb
avgerror['mapce'] = sum([x['mapce'] for x in args]) / Ndb
avgerror['rmspce'] = sum([x['rmspce'] for x in args]) / Ndb # TODO: unsure of op validity
avgerror['stdpce'] = math.sqrt(sum([x['stdpce'] * x['stdpce'] for x in args]) / Ndb)
except TypeError:
pass
return avgerror
def format_errors(err, mode=1):
"""From error dictionary *err*, returns a LaTeX-formatted string,
after handling None entries.
"""
onedecimal = r"""{0:8.1f}"""
twodecimal = r"""{0:8.2f}"""
threedecimal = r"""{0:12.3f}"""
fourdecimal = r"""{0:12.4f}"""
shortblank = r"""{0:8s}""".format('')
longblank = r"""{0:12s}""".format('')
if mode == 1:
me = ' ----' if err['me'] is None else '%+.2f' % (err['me'])
stde = '----' if err['stde'] is None else '%.2f' % (err['stde'])
mae = ' ----' if err['mae'] is None else '%6.2f' % (err['mae'])
mape = ' ---- ' if err['mape'] is None else '%6.1f\%%' % (100 * err['mape'])
mapbe = ' ---- ' if err['mapbe'] is None else '%6.1f\%%' % (100 * err['mapbe'])
mapce = ' ---- ' if err['mapce'] is None else '%6.1f\%%' % (100 * err['mapce'])
text = """$\{%s; %s\}$ %s %s %s""" % \
(me, stde, mae, mape, mapce)
return text
if mode == 2:
sdict = OrderedDict()
for lbl in ['pexe', 'nexe', 'maxe', 'mine', 'me', 'mae', 'rmse', 'stde']:
sdict[lbl] = ' ----' if err[lbl] is None else fourdecimal.format(err[lbl])
for lbl in ['pexpe', 'nexpe', 'maxpe', 'minpe', 'mpe', 'mape', 'rmspe', 'stdpe',
'pexpbe', 'nexpbe', 'maxpbe', 'minpbe', 'mpbe', 'mapbe', 'rmspbe', 'stdpbe',
'pexpce', 'nexpce', 'maxpce', 'minpce', 'mpce', 'mapce', 'rmspce', 'stdpce']:
sdict[lbl] = ' ----' if err[lbl] is None else threedecimal.format(100 * err[lbl])
text = """nex: {nexe}{nexpe}{nexpbe}{nexpce}\n""" \
"""pex: {pexe}{pexpe}{pexpbe}{pexpce}\n""" \
"""min: {mine}{minpe}{minpbe}{minpce}\n""" \
"""max: {maxe}{maxpe}{maxpbe}{maxpce}\n""" \
"""m: {me}{mpe}{mpbe}{mpce}\n""" \
"""ma: {mae}{mape}{mapbe}{mapce}\n""" \
"""rms: {rmse}{rmspe}{rmspbe}{rmspce}\n""" \
"""std: {stde}{stdpe}{stdpbe}{stdpce}\n""".format(**sdict)
return text
if mode == 3:
sdict = OrderedDict()
# shortblanks changed from empty strings Aug 2015
for lbl in ['pexe', 'nexe', 'maxe', 'mine', 'me', 'mae', 'rmse', 'stde']:
sdict[lbl] = shortblank if err[lbl] is None else twodecimal.format(err[lbl])
for lbl in ['pexpe', 'nexpe', 'maxpe', 'minpe', 'mpe', 'mape', 'rmspe', 'stdpe',
'pexpbe', 'nexpbe', 'maxpbe', 'minpbe', 'mpbe', 'mapbe', 'rmspbe', 'stdpbe',
'pexpce', 'nexpce', 'maxpce', 'minpce', 'mpce', 'mapce', 'rmspce', 'stdpce']:
sdict[lbl] = shortblank if err[lbl] is None else onedecimal.format(100 * err[lbl])
return sdict
def string_contrast(ss):
"""From an array of strings, *ss*, returns maximum common prefix
string, maximum common suffix string, and array of middles.
"""
s = [item + 'q' for item in ss if item is not None]
short = min(s, key=len)
for ib in range(len(short)):
if not all([mc[ib] == short[ib] for mc in s]):
preidx = ib
break
else:
preidx = 0
for ib in range(len(short)):
ie = -1 * (ib + 1)
if not all([mc[ie] == short[ie] for mc in s]):
sufidx = ie + 1
break
else:
sufidx = -1 * (len(short))
miditer = iter([mc[preidx:sufidx] for mc in s])
prefix = short[:preidx]
suffix = short[sufidx:-1]
middle = ['' if mc is None else next(miditer) for mc in ss]
return prefix, suffix, middle
def oxcom(lst):
"""Returns gramatical comma separated string of *lst*."""
lst = [str(l) for l in lst]
if not lst:
return ''
elif len(lst) == 1:
return lst[0]
elif len(lst) == 2:
return ' and '.join(lst)
else:
return ', and '.join([', '.join(lst[:-1]), lst[-1]])
def cure_weight(refrxn, refeq, rrat, xi=0.2):
"""
:param refeq: value of benchmark for equilibrium Reaction
:param rrat: ratio of intermonomer separation for Reaction to equilibrium Reaction
:param xi: parameter
:return: weight for CURE
"""
sigma = xi * abs(refeq) / (rrat ** 3)
weight = max(abs(refrxn), sigma)
return weight
def balanced_error(refrxn, refeq, rrat, m=0.03, p=10.0):
"""
:param refrxn:
:param refeq:
:param rrat:
:param m: minimum permitted weight for a point
:param p: multiples of abs(refeq) above refeq to which zero-line in head is displaced
:return:
"""
one = float(1)
q = one if rrat >= one else p
qm1perat = q - 1 + refrxn / refeq
weight = max(m, qm1perat / q)
mask = weight * q / abs(qm1perat)
return mask, weight
def fancify_mc_tag(mc, latex=False):
"""From the usual MTD-opt1_opt2-bas model chemistry identifier, return
string based on fullname, if *latex* is False or latex if *latex* is True.
"""
try:
mtd, mod, bas = mc.split('-')
except ValueError:
text = mc
else:
if latex:
text = r"""%20s / %-20s %s""" % (methods[mtd].latex, bases[bas].latex, mod)
else:
text = r"""%20s / %s, %s""" % (methods[mtd].fullname, bases[bas].fullname, mod)
return text
class ReactionDatum(object):
"""Piece of quantum chemical information that describes a qcdb.Reaction object.
"""
def __init__(self, dbse, rxn, method, mode, basis, value, units='kcal/mol', citation=None, doi=None, comment=None):
# geometry
self.dbrxn = dbse + '-' + str(rxn)
# qcdb.Method
self.method = method
# mode, e.g., unCP, CP, RLX, etc.
self.mode = mode
# qcdb.BasisSet
self.basis = basis
# numerical value for reaction
self.value = float(value)
# energy unit attached to value, defaults to kcal/mol
self.units = units
# publication citation of value
self.citation = citation
# digital object identifier for publication (maybe this should be doi of datum, not of pub?)
self.doi = doi
# addl comments
self.comment = comment
@classmethod
def library_modelchem(cls, dbse, rxn, method, mode, basis, value, units='kcal/mol', citation=None, doi=None,
comment=None):
"""Constructor when method and basis are strings corresponding to
qcdb.Method and qcdb.BasisSet already defined in methods and bases.
"""
# computational method
try:
tmp_method = methods[method.upper()]
except KeyError as e:
raise ValidationError("""Invalid ReactionDatum method %s: %s""" % (method, e))
# computational basis set
try:
tmp_basis = bases[basis.lower()]
except KeyError as e:
raise ValidationError("""Invalid ReactionDatum basis %s: %s""" % (basis, e))
# publication
if citation is None:
tmp_pub = citation
else:
try:
tmp_pub = pubs[citation.lower()]
except KeyError as e:
raise ValidationError("""Invalid ReactionDatum publication %s: %s""" % (citation, e))
return cls(dbse, rxn, tmp_method, mode, tmp_basis, value, units, citation=tmp_pub, doi=doi, comment=comment)
def __str__(self):
text = ''
text += """ ==> ReactionDatum <==\n\n"""
text += """ Database reaction: %s\n""" % (self.dbrxn)
text += """ Method: %s\n""" % (self.method.fullname)
text += """ Mode: %s\n""" % (self.mode)
text += """ Basis: %s\n""" % (self.basis.fullname)
text += """ Value: %f [%s]\n""" % (self.value, self.units)
text += """ Citation: %s %s\n""" % (self.citation.name, self.citation.doi)
text += """ DOI: %s\n""" % (self.doi)
text += """ Comment: %s\n""" % (self.comment)
text += """\n"""
return text
class Subset(object):
"""Affiliated qcdb.Reaction-s
"""
def __init__(self, name, hrxn, tagl=None, axis=None):
# identifier
self.name = name
# array of reactions names
self.hrxn = hrxn
# description line
self.tagl = tagl
# mathematical relationships of reactions
self.axis = OrderedDict()
def __str__(self):
text = ''
text += """ ==> %s Subset <==\n\n""" % (self.name)
text += """ Tagline: %s\n""" % (self.tagl)
text += """ %20s""" % ('Reactions')
for ax in self.axis.keys():
text += """ %8s""" % (ax)
text += """\n"""
for ix in range(len(self.hrxn)):
text += """ %20s""" % (str(self.hrxn[ix]))
for ax in self.axis.values():
text += """ %8.3f""" % (ax[ix])
text += """\n"""
text += """\n"""
return text
class Reagent(object):
"""Chemical entity only slightly dresed up from qcdb.Molecule.
"""
def __init__(self, name, mol, tagl=None, comment=None):
# full name, e.g., 'S22-2-dimer' or 'NBC1-BzMe-8.0-monoA-CP' or 'HTBH-HCl-reagent'
self.name = name
# qcdb.Molecule
try:
self.NRE = mol.nuclear_repulsion_energy()
except AttributeError:
raise ValidationError("""Reagent must be instantiated with qcdb.Molecule object.""")
else:
self.mol = mol.create_psi4_string_from_molecule()
# description line
self.tagl = tagl
# # addl comments
# self.comment = comment
# # fragmentation
# self.fragments = mol.fragments
# # frag activation
# self.frtype = mol.fragment_types
# # frag charge
# self.frchg = mol.fragment_charges
# # frag multiplicity
# self.frmult = mol.fragment_multiplicities
self.charge = mol.molecular_charge()
def __str__(self):
text = ''
text += """ ==> %s Reagent <==\n\n""" % (self.name)
text += """ Tagline: %s\n""" % (self.tagl)
# text += """ Comment: %s\n""" % (self.comment)
text += """ NRE: %f\n""" % (self.NRE)
# text += """ Charge: %+d\n"""
# text += """ Fragments: %d\n""" % (len(self.fragments))
# text += """ FrgNo Actv Chg Mult AtomRange\n"""
# for fr in range(len(self.fragments)):
# text += """ %-4d %1s %+2d %2d %s\n""" % (fr + 1,
# '*' if self.frtype[fr] == 'Real' else '',
# self.frchg[fr], self.frmult[fr], self.fragments[fr])
text += """ Molecule: \n%s""" % (self.mol)
text += """\n"""
return text
class Reaction(object):
"""
"""
def __init__(self, name, dbse, indx, tagl=None, latex=None, color='black', comment=None):
# name, e.g., '2' or 'BzMe-8.0'
self.name = name
# database reaction name, e.g., 'S22-2' or 'NBC1-BzMe-8.0'
self.dbrxn = dbse + '-' + str(name)
# numerical index of reaction
self.indx = indx
# description line
self.tagl = tagl
# latex description
self.latex = latex
# addl comments
self.comment = comment
# reaction matrices, specifying reagent contributions per reaction
self.rxnm = {}
# qcdb.ReactionDatum objects of quantum chemical data pertaining to reaction
self.data = {}
# benchmark qcdb.ReactionDatum
self.benchmark = None
# color for plotting
self.color = color
def __str__(self):
text = ''
text += """ ==> %s Reaction <==\n\n""" % (self.name)
text += """ Database reaction: %s\n""" % (self.dbrxn)
text += """ Index: %s\n""" % (self.indx)
text += """ LaTeX representation: %s\n""" % (self.latex)
text += """ Tagline: %s\n""" % (self.tagl)
text += """ Comment: %s\n""" % (self.comment)
if self.benchmark is None:
text += """ Benchmark: %s\n""" % ('UNDEFINED')
else:
text += """ Benchmark: %f\n""" % (self.data[self.benchmark].value)
text += """ Color: %s\n""" % (str(self.color))
text += """ Reaction matrix:\n"""
for mode, rxnm in self.rxnm.items():
text += """ %s\n""" % (mode)
for rgt, coeff in rxnm.items():
text += """ %3d %s\n""" % (coeff, rgt.name)
text += """ Data:\n"""
for label, datum in sorted(self.data.items()):
text += """ %8.2f %s\n""" % (datum.value, label)
text += """\n"""
return text
def compute_errors(self, benchmark='default', mcset='default', failoninc=True, verbose=False):
"""For all data or modelchem subset *mcset*, computes raw reaction
errors between *modelchem* and *benchmark* model chemistries.
Returns error if model chemistries are missing for any reaction in
subset unless *failoninc* set to False, whereupon returns partial.
Returns dictionary of reaction labels and error forms.
"""
if mcset == 'default':
lsslist = self.data.keys()
elif callable(mcset):
# mcset is function that will generate subset of HRXN from sset(self)
lsslist = [mc for mc in self.data.keys() if mc in mcset(self)] # untested
else:
# mcset is array containing modelchemistries
lsslist = [mc for mc in self.data.keys() if mc in mcset]
# assemble dict of qcdb.Reaction objects from array of reaction names
lsset = OrderedDict()
for mc in lsslist:
lsset[mc] = self.data[mc]
lbench = self.benchmark if benchmark == 'default' else benchmark
try:
mcGreater = self.data[lbench].value
except KeyError as e:
raise ValidationError("""Reaction %s missing benchmark datum %s.""" % (self.name, str(e)))
err = {}
for label, datum in lsset.items():
try:
mcLesser = datum.value
except KeyError as e:
if failoninc:
raise ValidationError("""Reaction %s missing datum %s.""" % (label, str(e)))
else:
continue
err[label] = [mcLesser - mcGreater,
(mcLesser - mcGreater) / abs(mcGreater),
(mcLesser - mcGreater) / abs(mcGreater)] # TODO define BER
if verbose:
print("""p = %6.2f, pe = %6.1f%%, bpe = %6.1f%% modelchem %s.""" %
(err[label][0], 100 * err[label][1], 100 * err[label][2], label))
return err
def plot(self, benchmark='default', mcset='default',
failoninc=True, verbose=False, color='sapt',
xlimit=4.0, labeled=True, view=True,
mousetext=None, mouselink=None, mouseimag=None, mousetitle=None, mousediv=None,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Computes individual errors over model chemistries in *mcset* (which
may be default or an array or a function generating an array) versus
*benchmark*. Thread *color* can be 'rgb' for old coloring, a color
name or 'sapt' for spectrum coloring.
*saveas* conveys directory ('/') and/or filename for saving the
resulting plot. File extension is not accessible, but *graphicsformat*
array requests among 'png', 'pdf', and 'eps' formats. *relpath*
forces paths to saved files to be relative to current directory,
rather than absolute paths for returned code and file dictionary.
Prepares thread diagram instructions and either executes them if
matplotlib available (Canopy or Anaconda) or prints them. Returns a
dictionary of all saved plot filenames. If any of *mousetext*, *mouselink*,
or *mouseimag* is specified, htmlcode will be returned with an image map of
slats to any of text, link, or image, respectively.
"""
# compute errors
dbse = self.dbrxn.split('-')[0]
indiv = self.compute_errors(benchmark=benchmark, mcset=mcset,
failoninc=failoninc, verbose=verbose)
# repackage
dbdat = []
for mc in indiv.keys():
dbdat.append({'db': dbse,
'show': fancify_mc_tag(mc),
'sys': mc,
'color': self.color,
'data': [indiv[mc][0]]})
mae = None # [errors[ix][self.dbse]['mae'] for ix in index]
mape = None # [100 * errors[ix][self.dbse]['mape'] for ix in index]
# form unique filename
# ixpre, ixsuf, ixmid = string_contrast(index)
# title = self.dbse + ' ' + ixpre + '[]' + ixsuf
title = self.dbrxn
labels = ['']
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict, htmlcode = mpl.threads(%s,\n color='%s',\n title='%s',\n labels=%s,\n mae=%s,\n mape=%s\n xlimit=%s\n labeled=%s\n saveas=%s\n mousetext=%s\n mouselink=%s\n mouseimag=%s\n mousetitle=%s,\n mousediv=%s,\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, color, title, labels, mae, mape, str(xlimit),
repr(labeled), repr(saveas), repr(mousetext), repr(mouselink), repr(mouseimag),
repr(mousetitle), repr(mousediv), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict, htmlcode = mpl.threads(dbdat, color=color, title=title, labels=labels, mae=mae, mape=mape,
xlimit=xlimit, labeled=labeled, view=view,
mousetext=mousetext, mouselink=mouselink,
mouseimag=mouseimag, mousetitle=mousetitle, mousediv=mousediv,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict, htmlcode
class WrappedDatabase(object):
"""Wrapper class for raw Psi4 database modules that does some validation
of contents, creates member data and accessors for database structures,
defines error computation, and handles database subsets. Not to be used
directly-- see qcdb.Database for handling single or multiple
qdcb.WrappedDatabase objects and defining nice statistics, plotting, and
table functionalities.
>>> asdf = qcdb.WrappedDatabase('Nbc10')
"""
def __init__(self, dbname, pythonpath=None):
"""Instantiate class with case insensitive name *dbname*. Module
search path can be prepended with *pythonpath*.
"""
#: internal name of database
#:
#: >>> print asdf.dbse
#: 'NBC1'
self.dbse = None
#: description line
#:
#: >>> print asdf.tagl
#: 'interaction energies of dissociation curves for non-bonded systems'
self.tagl = None
#: OrderedDict of reactions/members
#:
#: >>> print asdf.hrxn.keys()
#: ['BzBz_S-3.2', 'BzBz_S-3.3', ... 'BzBz_PD36-2.8', 'BzBz_PD36-3.0']
self.hrxn = None
#: dict of reagents/geometries
#:
#: >>> print asdf.hrgt.keys()
#: ['NBC1-BzBz_PD32-0.8-monoA-CP', 'NBC1-BzBz_PD34-0.6-dimer', ... 'NBC1-BzBz_PD34-1.7-dimer']
self.hrgt = None
#: dict of defined reaction subsets.
#: Note that self.sset['default'] contains all the nonredundant information.
#:
#: >>> print asdf.sset.keys()
#: ['meme', 'mxddpp', '5min', ... 'small']
self.sset = None
# Removing hrxn, hrgt etc. do not reduce the size of the object.
# These attributes are stored for ease of access for adding qc info, etc.
#: object of defined reaction subsets.
self.oss = None
# load database
if pythonpath is not None:
sys.path.insert(1, pythonpath)
else:
sys.path.append(os.path.dirname(__file__) + '/../databases')
database = psiutil.import_ignorecase(dbname)
if not database:
print('\nPython module for database %s failed to load\n\n' % (dbname))
print('\nSearch path that was tried:\n')
print(", ".join(map(str, sys.path)))
raise ValidationError("Python module loading problem for database " + str(dbname))
# gross validation of database
for item in ['dbse', 'GEOS', 'HRXN', 'ACTV', 'RXNM']:
try:
getattr(database, item)
except AttributeError:
raise ValidationError("""Database %s severely deformed with %s missing.""" % (database.__name__, item))
for item in ['TAGL', 'BIND']:
try:
getattr(database, item)
except AttributeError:
print("""Warning: Database %s possibly deformed with %s missing.\n""" % (database.__name__, item))
# form database name
self.dbse = database.dbse
try:
self.tagl = database.TAGL['dbse']
except KeyError:
print("""Warning: TAGL missing for database %s""" % (self.dbse))
# form array of database contents to process through
pieces = []
for item in dir(database):
if item in ['qcdb', 'rxn', 'dbse', 'TAGL']:
pass
elif item.startswith('__'):
pass
else:
pieces.append(item)
# form qcdb.Reagent objects from all defined geometries, GEOS
oHRGT = {}
for rgt, mol in database.GEOS.items():
mol.update_geometry()
try:
tagl = database.TAGL[rgt]
except KeyError:
tagl = None
print("""Warning: TAGL missing for reagent %s""" % (rgt))
oHRGT[rgt] = Reagent(name=rgt, mol=mol, tagl=tagl)
pieces.remove('GEOS')
self.hrgt = oHRGT
# form qcdb.Reaction objects from comprehensive reaction list, HRXN
oHRXN = OrderedDict()
for rxn in database.HRXN:
try:
tagl = database.TAGL[database.dbse + '-' + str(rxn)]
except KeyError:
tagl = None
print("""Warning: TAGL missing for reaction %s""" % (rxn))
try:
elst = database.DATA['SAPT ELST ENERGY'][database.dbse + '-' + str(rxn)]
disp = database.DATA['SAPT DISP ENERGY'][database.dbse + '-' + str(rxn)]
color = abs(elst) / (abs(elst) + abs(disp))
except (KeyError, AttributeError):
color = 'black'
print("""Warning: DATA['SAPT * ENERGY'] missing for reaction %s""" % (rxn))
oHRXN[rxn] = Reaction(name=rxn,
dbse=database.dbse,
indx=database.HRXN.index(rxn) + 1,
color=color,
tagl=tagl)
pieces.remove('HRXN')
self.hrxn = oHRXN
# list and align database stoichiometry modes, ACTV* and RXNM*
oACTV = {}
for modactv in [item for item in pieces if item.startswith('ACTV')]:
modrxnm = modactv.replace('ACTV', 'RXNM')
mode = 'default' if modactv == 'ACTV' else modactv.replace('ACTV_', '')
try:
getattr(database, modrxnm)
except AttributeError:
modrxnm = 'RXNM'
oACTV[mode] = [modactv, modrxnm]
for item in [tmp for tmp in pieces if tmp.startswith('ACTV') or tmp.startswith('RXNM')]:
pieces.remove(item)
# populate reaction matrices in qcdb.Reaction objects
for rxn in database.HRXN:
dbrxn = database.dbse + '-' + str(rxn)
for mode, actvrxnm in oACTV.items():
tdict = OrderedDict()
for rgt in getattr(database, actvrxnm[0])[dbrxn]:
tdict[oHRGT[rgt]] = getattr(database, actvrxnm[1])[dbrxn][rgt]
oHRXN[rxn].rxnm[mode] = tdict
# list embedded quantum chem info per rxn, incl. BIND*
arrsbind = [item for item in pieces if item.startswith('BIND_')]
if len(arrsbind) == 0:
if 'BIND' in pieces:
arrsbind = ['BIND']
else:
arrsbind = []
print("""Warning: No BIND array with reference values.""")
else:
for arrbind in arrsbind:
if getattr(database, arrbind) is database.BIND:
break
else:
print("""Warning: No BIND_* array assigned to be master BIND.""")
oBIND = {}
for arrbind in arrsbind:
ref = database.dbse + 'REF' if arrbind == 'BIND' else arrbind.replace('BIND_', '')
methods[ref] = Method(name=ref)
bases[ref] = BasisSet(name=ref)
try:
getattr(database, 'BINDINFO_' + ref)
except AttributeError:
arrbindinfo = None
print("""Warning: No BINDINFO dict with BIND attribution and modelchem for %s.""" % (ref))
else:
arrbindinfo = 'BINDINFO_' + ref
oBIND[ref] = [methods[ref], 'default', bases[ref], arrbind,
(getattr(database, arrbind) is database.BIND),
arrbindinfo]
for item in [tmp for tmp in pieces if tmp.startswith('BIND')]:
pieces.remove(item)
# populate data with reference values in qcdb.Reaction objects
for rxn in database.HRXN:
dbrxn = database.dbse + '-' + str(rxn)
for ref, info in oBIND.items():
bindval = getattr(database, info[3])[dbrxn]
if info[5] is None:
methodfeed = info[0]
modefeed = info[1]
basisfeed = info[2]
citationkey = 'anon'
else:
bindinforxn = getattr(database, info[5])[dbrxn]
methodfeed = methods[bindinforxn['method'].upper()] if 'method' in bindinforxn else info[0]
modefeed = bindinforxn['mode'] if 'mode' in bindinforxn else info[1]
basisfeed = bases[bindinforxn['basis'].lower()] if 'basis' in bindinforxn else info[2]
citationkey = bindinforxn['citation'].lower() if 'citation' in bindinforxn else 'anon'
citationfeed = pubs[citationkey]
if bindval is not None:
oHRXN[rxn].data[ref] = ReactionDatum(dbse=database.dbse, rxn=rxn,
method=methodfeed, mode=modefeed,
basis=basisfeed, citation=citationfeed,
value=bindval)
# oHRXN[rxn].data[ref] = ReactionDatum(dbse=database.dbse,
# rxn=rxn,
# method=info[0],
# mode=info[1],
# basis=info[2],
# value=bindval)
# #value=getattr(database, info[3])[dbrxn])
if info[4]:
oHRXN[rxn].benchmark = ref
# Process subsets
oSSET = {}
fsHRXN = frozenset(database.HRXN)
for sset in pieces:
if not sset.startswith('AXIS_'):
try:
fssset = frozenset(getattr(database, sset))
except TypeError:
continue
if fssset.issubset(fsHRXN):
oSSET[sset] = getattr(database, sset)
for item in oSSET.keys():
pieces.remove(item)
oSSET['HRXN'] = database.HRXN
self.sset = OrderedDict()
self.oss = OrderedDict() # just in case oss replaces sset someday
for item in oSSET.keys():
if item == 'HRXN_SM':
label = 'small'
elif item == 'HRXN_LG':
label = 'large'
elif item == 'HRXN_EQ':
label = 'equilibrium'
elif item == 'HRXN':
label = 'default'
elif item.startswith('HRXN_'):
label = item.replace('HRXN_', '').lower()
else:
label = item.lower()
# subsets may have different ordering from HRXN
self.sset[label] = OrderedDict()
for rxn in oSSET[item]:
self.sset[label][rxn] = oHRXN[rxn]
# initialize subset objects with light info
try:
sstagl = database.TAGL[item]
except KeyError:
try:
sstagl = database.TAGL[label]
except KeyError:
sstagl = None
print("""Warning: TAGL missing for subset %s""" % (label))
self.oss[label] = Subset(name=label,
hrxn=self.sset[label].keys(),
tagl=sstagl)
# Process axes
for axis in [item for item in pieces if item.startswith('AXIS_')]:
label = axis.replace('AXIS_', '')
try:
defn = getattr(database, axis)
except AttributeError:
raise ValidationError("""Axis %s not importable.""" % (label))
axisrxns = frozenset(defn.keys())
attached = False
for ss, rxns in self.sset.items():
if frozenset(rxns).issubset(axisrxns):
ordered_floats = []
for rx in self.oss[ss].hrxn:
ordered_floats.append(defn[rx])
self.oss[ss].axis[label] = ordered_floats
attached = True
if not attached:
print("""Warning: AXIS %s not affiliated with a subset""" % (label))
pieces.remove(axis)
print("""WrappedDatabase %s: Unparsed attributes""" % (self.dbse), pieces)
def __str__(self):
text = ''
text += """ ==> %s WrappedDatabase <==\n\n""" % (self.dbse)
text += """ Reagents: %s\n""" % (self.hrgt.keys())
text += """ Reactions: %s\n""" % (self.hrxn.keys())
text += """ Subsets: %s\n""" % (self.sset.keys())
text += """ Reference: %s\n""" % (self.benchmark())
text += """\n"""
return text
def add_ReactionDatum(self, dbse, rxn, method, mode, basis, value, units='kcal/mol', citation=None, comment=None,
overwrite=False):
"""Add a new quantum chemical value to *rxn* by creating a
qcdb.ReactionDatum from same arguments as that class's
object-less constructor. *rxn* may be actual Reaction.name
or Reaction.indx.
"""
if (self.dbse == dbse):
if rxn in self.hrxn:
rxnname = rxn # rxn is proper reaction name
else:
try:
if (rxn + 1 > 0) and (rxn == self.hrxn.items()[rxn - 1][1].indx):
rxnname = self.hrxn.items()[rxn - 1][1].name # rxn is reaction index (maybe dangerous?)
except (TypeError, IndexError):
raise ValidationError(
"""Inconsistent to add ReactionDatum for %s to database %s with reactions %s.""" %
(dbse + '-' + str(rxn), self.dbse, self.hrxn.keys()))
label = '-'.join([method, mode, basis])
if overwrite or (label not in self.hrxn[rxnname].data):
self.hrxn[rxnname].data[label] = ReactionDatum.library_modelchem(dbse=dbse, rxn=rxnname,
method=method, mode=mode, basis=basis,
value=value, units=units,
comment=comment, citation=citation)
else:
raise ValidationError("""ReactionDatum %s already present in Database.""" % (label))
else:
raise ValidationError("""Inconsistent to add ReactionDatum for %s to database %s.""" %
(dbse + '-' + str(rxn), self.dbse))
def add_Subset(self, name, func):
"""Define a new subset labeled *name* by providing a function
*func* that filters *self.hrxn*.
"""
sname = name.lower().split('\n')
label = sname.pop(0)
tagl = sname[0].strip() if sname else None
try:
filtered = func(self)
lsslist = [rxn for rxn in self.sset['default'].keys() if rxn in filtered]
except TypeError as e:
raise ValidationError("""Function %s did not return list: %s.""" % (func.__name__, str(e)))
if len(lsslist) == 0:
print("""WrappedDatabase %s: Subset %s NOT formed: empty""" % (self.dbse, label))
return
self.sset[label] = OrderedDict()
for rxn in lsslist:
self.sset[label][rxn] = self.hrxn[rxn]
self.oss[label] = Subset(name=label,
hrxn=self.sset[label].keys(),
tagl=tagl)
print("""WrappedDatabase %s: Subset %s formed: %d""" % (self.dbse, label, len(self.sset[label].keys())))
def compute_errors(self, modelchem, benchmark='default', sset='default', failoninc=True, verbose=False):
"""For full database or subset *sset*, computes raw reaction
errors between *modelchem* and *benchmark* model chemistries.
Returns error if model chemistries are missing for any reaction in
subset unless *failoninc* set to False, whereupon returns partial.
Returns dictionary of reaction labels and error forms.
"""
if isinstance(sset, basestring):
# sset is normal subset name 'MX' corresponding to HRXN_MX or MX array in database module
try:
lsset = self.sset[sset.lower()]
except KeyError as e:
# raise ValidationError("""Subset named %s not available""" % (str(e)))
lsset = OrderedDict()
else:
if callable(sset):
# sset is function that will generate subset of HRXN from sset(self)
lsslist = [rxn for rxn in self.sset['default'].keys() if rxn in sset(self)]
else:
# sset is array containing reactions
lsslist = [rxn for rxn in self.sset['default'].keys() if rxn in sset]
# assemble dict of qcdb.Reaction objects from array of reaction names
lsset = OrderedDict()
for rxn in lsslist:
lsset[rxn] = self.hrxn[rxn]
# cureinfo = self.get_pec_weightinfo()
err = {}
for rxn, oRxn in lsset.items():
lbench = oRxn.benchmark if benchmark == 'default' else benchmark
try:
mcLesser = oRxn.data[modelchem].value
except KeyError as e:
if failoninc:
raise ValidationError("""Reaction %s missing datum %s.""" % (str(rxn), str(e)))
else:
continue
try:
mcGreater = oRxn.data[lbench].value
except KeyError as e:
if lbench == 'ZEROS':
pass
else:
print("""Reaction %s missing benchmark""" % (str(rxn)))
continue
# handle particulars of PEC error measures
# rxncureinfo = cureinfo[rxn]
# try:
# mcGreaterCrvmin = self.hrxn[rxncureinfo['eq']].data[lbench].value
# except KeyError as e:
# print """Reaction %s missing benchmark""" % (str(eqrxn))
# cure_denom = cure_weight(refrxn=mcGreater, refeq=mcGreaterCrvmin, rrat=rxncureinfo['Rrat'])
# balanced_mask, balwt = balanced_error(refrxn=mcGreater, refeq=mcGreaterCrvmin, rrat=rxncureinfo['Rrat'])
if lbench == 'ZEROS':
err[rxn] = [mcLesser,
0.0, 0.0, 0.0, 1.0] # FAKE
else:
err[rxn] = [mcLesser - mcGreater,
(mcLesser - mcGreater) / abs(mcGreater),
(mcLesser - mcGreater) / abs(mcGreater), # FAKE
(mcLesser - mcGreater) / abs(mcGreater), # FKAE
1.0 # FAKE
]
# (mcLesser - mcGreater) / abs(cure_denom),
# (mcLesser - mcGreater) * balanced_mask / abs(mcGreaterCrvmin),
# balwt]
if verbose:
print("""p = %8.4f, pe = %8.3f%%, pbe = %8.3f%% pce = %8.3f%% reaction %s.""" %
(err[rxn][0], 100 * err[rxn][1], 100 * err[rxn][3], 100 * err[rxn][2], str(rxn)))
return err
def compute_statistics(self, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False, returnindiv=False):
"""For full database or subset *sset*, computes many error
statistics between single *modelchem* and *benchmark* model
chemistries. Returns error if model chemistries are missing
for any reaction in subset unless *failoninc* set to False,
whereupon returns partial statistics. Returns dictionary of
statistics labels and values.
"""
err = self.compute_errors(modelchem, benchmark=benchmark, sset=sset, failoninc=failoninc, verbose=verbose)
if len(err) == 0:
error = initialize_errors()
if verbose:
print("""Warning: nothing to compute.""")
else:
Nrxn = float(len(err))
error = OrderedDict()
# linear (absolute) error
linear = [val[0] for val in err.values()]
error['pexe'] = max(linear)
error['nexe'] = min(linear)
error['maxe'] = max(linear, key=lambda x: abs(x))
error['mine'] = min(linear, key=lambda x: abs(x))
error['me'] = sum(linear) / Nrxn
error['mae'] = sum(map(abs, linear)) / Nrxn
error['rmse'] = math.sqrt(sum(map(lambda x: x ** 2, linear)) / Nrxn)
error['stde'] = math.sqrt((sum(map(lambda x: x ** 2, linear)) - (sum(linear) ** 2) / Nrxn) / Nrxn)
# fractional (relative) error
relative = [val[1] for val in err.values()]
error['pexpe'] = max(relative)
error['nexpe'] = min(relative)
error['maxpe'] = max(relative, key=lambda x: abs(x))
error['minpe'] = min(relative, key=lambda x: abs(x))
error['mpe'] = sum(relative) / Nrxn
error['mape'] = sum(map(abs, relative)) / Nrxn
error['rmspe'] = math.sqrt(sum(map(lambda x: x ** 2, relative)) / Nrxn)
error['stdpe'] = math.sqrt((sum(map(lambda x: x ** 2, relative)) - (sum(relative) ** 2) / Nrxn) / Nrxn)
# balanced (relative) error
balanced = [val[3] for val in err.values()]
balwt = sum([val[4] for val in err.values()]) # get the wt fn. highly irregular TODO
error['pexpbe'] = max(balanced)
error['nexpbe'] = min(balanced)
error['maxpbe'] = max(balanced, key=lambda x: abs(x))
error['minpbe'] = min(balanced, key=lambda x: abs(x))
error['mpbe'] = sum(balanced) / balwt #Nrxn
error['mapbe'] = sum(map(abs, balanced)) / balwt #Nrxn
error['rmspbe'] = math.sqrt(sum(map(lambda x: x ** 2, balanced)) / balwt) #Nrxn)
error['stdpbe'] = None # get math domain errors w/wt in denom math.sqrt((sum(map(lambda x: x ** 2, balanced)) - (sum(balanced) ** 2) / balwt) / balwt) #/ Nrxn) / Nrxn)
# capped (relative) error
capped = [val[2] for val in err.values()]
error['pexpce'] = max(capped)
error['nexpce'] = min(capped)
error['maxpce'] = max(capped, key=lambda x: abs(x))
error['minpce'] = min(capped, key=lambda x: abs(x))
error['mpce'] = sum(capped) / Nrxn
error['mapce'] = sum(map(abs, capped)) / Nrxn
error['rmspce'] = math.sqrt(sum(map(lambda x: x ** 2, capped)) / Nrxn)
error['stdpce'] = math.sqrt((sum(map(lambda x: x ** 2, capped)) - (sum(capped) ** 2) / Nrxn) / Nrxn)
if verbose:
print("""%d systems in %s for %s vs. %s, subset %s.\n%s""" %
(len(err), self.dbse, modelchem, benchmark, sset, format_errors(error, mode=2)))
if returnindiv:
return error, err
else:
return error
def load_qcdata(self, modname, funcname, pythonpath=None, failoninc=True):
"""Loads qcdb.ReactionDatums from module *modname* function
*funcname*. Module search path can be prepended with *pythonpath*.
"""
if pythonpath is not None:
sys.path.insert(1, pythonpath)
else:
sys.path.append(os.path.dirname(__file__) + '/../data')
try:
datamodule = __import__(modname)
except ImportError:
if not failoninc:
print("""%s data unavailable for database %s.\n""" % (modname, self.dbse))
return
else:
print("""\nPython module for database data %s failed to load\n\n""" % (modname))
print("""\nSearch path that was tried:\n""")
print(', '.join(map(str, sys.path)))
raise ValidationError("""Python module loading problem for database data """ + str(modname))
try:
getattr(datamodule, funcname)(self)
except AttributeError:
if not failoninc:
print("""%s %s data unavailable for database %s.\n""" % (modname, funcname, self.dbse))
return
else:
raise ValidationError("Python module missing function %s for loading data " % (str(funcname)))
print("""WrappedDatabase %s: %s %s results loaded""" % (self.dbse, modname, funcname))
def load_qcdata_byproject(self, project, pythonpath=None):
"""Loads qcdb.ReactionDatums from standard location for *project*
:module dbse_project and function load_project. Module search path
can be prepended with *pythonpath*.
"""
mod = self.dbse + '_' + project
func = 'load_' + project
self.load_qcdata(modname=mod, funcname=func, pythonpath=pythonpath)
def load_qcdata_hrxn_byproject(self, project, path=None):
""""""
if path is None:
path = os.path.dirname(__file__) + '/../data'
pklfile = os.path.abspath(path) + os.sep + self.dbse + '_hrxn_' + project + '.pickle'
if not os.path.isfile(pklfile):
raise ValidationError(
"Reactions pickle file for loading database data from file %s does not exist" % (pklfile))
with open(pklfile, 'rb') as handle:
hrxns = pickle.load(handle)
# no error checking for speed
for rxn, data in hrxns.items():
self.hrxn[rxn].data.update(data)
def load_qcdata_hdf5_trusted(self, project, path=None):
"""Loads qcdb.ReactionDatums from HDF5 file at path/dbse_project.h5 .
If path not given, looks in qcdb/data. This file is written by
reap-DB and so has been largely validated.
"""
if path is None:
path = os.path.dirname(__file__) + '/../data'
hdf5file = os.path.abspath(path) + os.sep + self.dbse + '_' + project + '.h5'
if not os.path.isfile(hdf5file):
raise ValidationError("HDF5 file for loading database data from file %s does not exist" % (hdf5file))
try:
import pandas as pd
except ImportError:
raise ValidationError("Pandas data managment module must be available for import")
try:
next(self.hrxn.iterkeys()) + 1
except TypeError:
intrxn = False
else:
intrxn = True
with pd.get_store(hdf5file) as handle:
for mc in handle['pdie'].keys():
lmc = mc.split('-') # TODO could be done better
method = lmc[0]
bsse = '_'.join(lmc[1:-1])
basis = lmc[-1]
df = handle['pdie'][mc]
for dbrxn in df.index[df.notnull()].values:
[dbse, rxn] = dbrxn.split('-', 1)
if intrxn:
rxn = int(rxn)
self.hrxn[rxn].data[mc] = ReactionDatum.library_modelchem(dbse=dbse, rxn=rxn,
method=method, mode=bsse, basis=basis,
value=df[dbrxn])
def integer_reactions(self):
"""Returns boolean of whether reaction names need to be cast to integer"""
try:
next(self.hrxn.iterkeys()) + 1
except TypeError:
return False
else:
return True
@staticmethod
def load_pickled(dbname, path=None):
"""
"""
if path is None:
path = os.path.dirname(__file__) + '/../data'
picklefile = psiutil.findfile_ignorecase(dbname,
pre=os.path.abspath(path) + os.sep, post='_WDb.pickle')
if not picklefile:
raise ValidationError("Pickle file for loading database data from file %s does not exist" % (
os.path.abspath(path) + os.sep + dbname + '.pickle'))
# with open('/var/www/html/bfdb_devel/bfdb/scratch/ASDFlogfile.txt', 'a') as handle:
# handle.write('<!-- PICKLE %s\n' % (picklefile))
with open(picklefile, 'rb') as handle:
instance = pickle.load(handle)
return instance
def available_modelchems(self, union=True):
"""Returns all the labels of model chemistries that have been
loaded. Either all modelchems that have data for any reaction if
*union* is True or all modelchems that have data for all reactions
if *union* is False.
"""
mcs = [set(v.data) for v in self.hrxn.itervalues()]
if union:
return sorted(set.union(*mcs))
else:
return sorted(set.intersection(*mcs))
def benchmark(self):
"""Returns the model chemistry label for the database's benchmark."""
bm = None
rxns = self.hrxn.itervalues()
while bm is None:
try:
bm = next(rxns).benchmark
except StopIteration:
break
return bm
# return next(self.hrxn.itervalues()).benchmark
# TODO all rxns have same bench in db module so all have same here in obj
# but the way things stored in Reactions, this doesn't have to be so
def load_subsets(self, modname='subsetgenerator', pythonpath=None):
"""Loads subsets from all functions in module *modname*.
"""
if pythonpath is not None:
sys.path.insert(1, pythonpath)
else:
sys.path.append(os.path.dirname(__file__))
try:
ssmod = __import__(modname)
except ImportError:
print("""\nPython module for database data %s failed to load\n\n""" % (modname))
print("""\nSearch path that was tried:\n""")
print(', '.join(map(str, sys.path)))
raise ValidationError("Python module loading problem for database subset generator " + str(modname))
for func in dir(ssmod):
if callable(getattr(ssmod, func)):
self.add_Subset(getattr(ssmod, func).__doc__, getattr(ssmod, func))
print("""WrappedDatabase %s: Defined subsets loaded""" % (self.dbse))
def get_pec_weightinfo(self):
"""
"""
def closest(u, options):
return max(options, key=lambda v: len(os.path.commonprefix([u, v])))
dbdat = {}
oss = self.oss['default']
eqrxns = [rxn for rxn, rr in zip(oss.hrxn, oss.axis['Rrat']) if rr == 1.0]
for rxnix, rxn in enumerate(oss.hrxn):
dbdat[rxn] = {'eq': closest(rxn, eqrxns),
'Rrat': oss.axis['Rrat'][rxnix]}
return dbdat
# def table_simple1(self, mtd, bas, opt=['CP'], err=['mae'], benchmark='default', failoninc=True,
# plotpath='analysis/flats/flat_', theme='smmerge'):
# rowplan = ['bas', 'mtd']
# columnplan = [
# ['l', r"""Method \& Basis Set""", '', textables.label, {}],
# ['d', r'S22', 'HB', textables.val, {'sset': 'hb'}],
# ['d', r'S22', 'MX', textables.val, {'sset': 'mx'}],
# ['d', r'S22', 'DD', textables.val, {'sset': 'dd'}],
# ['d', r'S22', 'TT', textables.val, {'sset': 'default'}],
# ]
#
# def table_simple2(self, mtd, bas, opt=['CP'], err=['mae'], benchmark='default', failoninc=True,
# plotpath='analysis/flats/flat_', theme='smmerge'):
# rowplan = ['bas', 'mtd']
# columnplan = [
# ['l', r"""Method \& Basis Set""", '', textables.label, {}],
# ['d', r'MAE', 'HB', textables.val, {'sset': 'hb'}],
# ['d', r'MAE', 'MX', textables.val, {'sset': 'mx'}],
# ['d', r'MAE', 'DD', textables.val, {'sset': 'dd'}],
# ['d', r'MAE', 'TT', textables.val, {'sset': 'default'}],
# ['d', r'MA\%E', 'HB', textables.val, {'sset': 'hb', 'err': 'mape'}],
# ['d', r'MA\%E', 'MX', textables.val, {'sset': 'mx', 'err': 'mape'}],
# ['d', r'MA\%E', 'DD', textables.val, {'sset': 'dd', 'err': 'mape'}],
# ['d', r'MA\%E', 'TT', textables.val, {'sset': 'default', 'err': 'mape'}],
# ['d', r'maxE', 'TT ', textables.val, {'sset': 'default', 'err': 'maxe'}],
# ['d', r'min\%E', ' TT', textables.val, {'sset': 'default', 'err': 'minpe'}],
# ['d', r'rmsE', 'TT ', textables.val, {'sset': 'default', 'err': 'rmse'}],
# ['d', r'devE', ' TT', textables.val, {'sset': 'default', 'err': 'stde'}],
# ]
#
# def table_simple3(self, mtd, bas, opt=['CP'], err=['mae'], benchmark='default', failoninc=True,
# plotpath='analysis/flats/flat_', theme='smmerge'):
# rowplan = ['err', 'bas', 'mtd']
# columnplan = [
# ['l', r"""Method \& Basis Set""", '', textables.label, {}],
# ['d', r'MAE', 'HB', textables.val, {'sset': 'hb'}],
# ['d', r'MAE', 'MX', textables.val, {'sset': 'mx'}],
# ['d', r'MAE', 'DD', textables.val, {'sset': 'dd'}],
# ['d', r'MAE', 'TT', textables.val, {'sset': 'default'}],
# ]
#
# def table_simple4(self, mtd, bas, opt=['CP'], err=['mae'], benchmark='default', failoninc=True,
# plotpath='analysis/flats/flat_', theme='smmerge'):
# plotpath = 'autogen' # TODO handle better
# rowplan = ['bas', 'mtd']
# columnplan = [
# ['l', r"""Method \& Basis Set""", '', textables.label, {}],
# ['d', r'S22', 'HB', textables.val, {'sset': 'hb'}],
# ['d', r'S22', 'MX', textables.val, {'sset': 'mx'}],
# ['d', r'S22', 'DD', textables.val, {'sset': 'dd'}],
# ['d', r'S22', 'TT', textables.val, {'sset': 'default'}],
# # ['l', r"""Error Distribution\footnotemark[1]""", r"""\includegraphics[width=6.67cm,height=3.5mm]{%s%s.pdf}""" % (plotpath, 'blank'), textables.graphics, {}],
# ['l', r"""Error Distribution\footnotemark[1]""", r"""""", textables.graphics, {}],
# ]
class Database(object):
"""Collection for handling single or multiple qcdb.WrappedDatabase objects.
Particularly, unifying modelchem and subset names that when inconsistent
across component databases. Also, defining statistics across databases.
>>> asdf = qcdb.Database(['s22', 'Nbc10', 'hbc6', 'HSG'], 'DB4')
>>> qwer = qcdb.Database('s22')
"""
def __init__(self, dbnamelist, dbse=None, pythonpath=None, loadfrompickle=False, path=None):
#: internal name of database collection
#:
#: >>> print asdf.dbse
#: 'DB4'
self.dbse = None
#: ordered component Database objects
#:
#: >>> print asdf.dbdict
#: XXXX
self.dbdict = OrderedDict()
#: subset assembly pattern
#:
#: >>> print asdf.sset.keys()
#: XXXX
self.sset = OrderedDict()
#: assembly pattern for transspecies modelchems
#:
#: >>> print asdf.mcs.keys()
#: XXXX
self.mcs = {}
self.benchmark = None
# slight validation, repackaging into dbnamelist
if isinstance(dbnamelist, basestring):
dbnamelist = [dbnamelist]
elif all(isinstance(item, basestring) for item in dbnamelist):
pass
else:
raise ValidationError('Database::constructor: Inappropriate configuration of constructor arguments')
# load databases
for db in dbnamelist:
if loadfrompickle:
tmp = WrappedDatabase.load_pickled(db, path=path)
else:
tmp = WrappedDatabase(db, pythonpath=pythonpath)
self.dbdict[tmp.dbse] = tmp
# slurp up the obvious overlaps
consolidated_bench = [odb.benchmark() for odb in self.dbdict.values()]
if len(set(consolidated_bench)) == 1:
self.benchmark = consolidated_bench[0]
else:
self.benchmark = ''.join(consolidated_bench)
self.mcs[self.benchmark] = consolidated_bench
# methods[ref] = Method(name=ref)
# bases[ref] = BasisSet(name=ref)
self.mcs['default'] = consolidated_bench
# self.mcs['default'] = [odb.benchmark() for odb in self.dbdict.values()]
self._intersect_subsets()
self._intersect_modelchems()
# complex subsets
self.load_subsets()
# collection name
self.dbse = ''.join(self.dbdict.keys()) if dbse is None else dbse
# merge Reaction-s
self.hrxn = OrderedDict()
for db, odb in self.dbdict.items():
for rxn, orxn in odb.hrxn.items():
self.hrxn[orxn.dbrxn] = orxn
# merge Reagent-s
self.hrgt = OrderedDict()
for db, odb in self.dbdict.items():
for rgt, orgt in odb.hrgt.items():
self.hrgt[orgt.name] = orgt
print("""Database %s: %s""" % (self.dbse, ', '.join(self.dbdict.keys())))
def __str__(self):
text = ''
text += """ ===> %s Database <===\n\n""" % (self.dbse)
# text += """ Reagents: %s\n""" % (self.hrgt.keys())
# text += """ Reactions: %s\n""" % (self.hrxn.keys())
text += """ Subsets: %s\n""" % (self.sset.keys())
# text += """ Reference: %s\n""" % ('default: ' + ' + '.join(self.mcs['default']))
try:
text += """ Reference: %s\n""" % (self.benchmark + ': ' + ' + '.join(self.mcs[self.benchmark]))
except TypeError:
text += """ Reference: %s\n""" % ('UNDEFINED')
text += """ Model Chemistries: %s\n""" % (
', '.join(sorted([mc for mc in self.mcs.keys() if mc is not None])))
text += """\n"""
for db in self.dbdict.keys():
text += self.dbdict[db].__str__()
return text
# def benchmark(self):
# """Returns the model chemistry label for the database's benchmark."""
# return self.benchmark #TODO not sure if right way to go about this self.mcs['default']
def fancy_mcs(self, latex=False):
"""
"""
fmcs = {}
for mc in self.mcs.keys():
try:
mtd, mod, bas = mc.split('-')
except ValueError:
fmcs[mc] = mc
else:
if latex:
tmp = """%s/%s, %s""" % \
(methods[mtd].latex, bases[bas].latex, mod.replace('_', '\\_'))
fmcs[mc] = """%45s""" % (tmp)
else:
fmcs[mc] = """%20s / %-20s, %s""" % \
(methods[mtd].fullname, bases[bas].fullname, mod)
return fmcs
# def fancy_mcs_nested(self):
# """
# """
# fmcs = defaultdict(lambda: defaultdict(dict))
# for mc in self.mcs.keys():
# try:
# mtd, mod, bas = mc.split('-')
# except ValueError:
# fmcs['All']['All'][mc] = mc
# fmcs['Method']['Others'][mc] = mc
# fmcs['Options']['Others'][mc] = mc
# fmcs['Basis Treatment']['Others'][mc] = mc
# else:
# fancyrepr = """%20s / %-20s %s""" % (methods[mtd].latex, bases[bas].latex, mod)
# fmcs['All']['All'][mc] = fancyrepr
# fmcs['Method'][methods[mtd].latex][mc] = fancyrepr
# fmcs['Options'][mod][mc] = fancyrepr
# fmcs['Basis Treatment'][bases[bas].latex][mc] = fancyrepr
# return fmcs
def integer_reactions(self):
"""Returns boolean of whether reaction names need to be cast to integer"""
return {db: odb.integer_reactions() for db, odb in self.dbdict.items()}
def load_qcdata_byproject(self, project, pythonpath=None):
"""For each component database, loads qcdb.ReactionDatums from
standard location for *project* :module dbse_project and function
load_project. Module search path can be prepended with *pythonpath*.
"""
for db, odb in self.dbdict.items():
odb.load_qcdata_byproject(project, pythonpath=pythonpath)
self._intersect_modelchems()
def load_qcdata_hdf5_trusted(self, project, path=None):
"""For each component database, loads qcdb.ReactionDatums from
HDF5 file at path/dbse_project.h5 . If path not given, looks in
qcdb/data. This file is written by reap-DB and so has been largely
validated.
"""
for db, odb in self.dbdict.items():
odb.load_qcdata_hdf5_trusted(project, path=path)
self._intersect_modelchems()
def load_qcdata_hrxn_byproject(self, project, path=None):
for db, odb in self.dbdict.items():
odb.load_qcdata_hrxn_byproject(project, path=path)
self._intersect_modelchems()
def available_projects(self, path=None):
""""""
import glob
if path is None:
path = os.path.dirname(__file__) + '/../data'
projects = []
for pjfn in glob.glob(path + '/*_hrxn_*.pickle'):
pj = pjfn[:-7].split('_')[-1]
projects.append(pj)
complete_projects = []
for pj in set(projects):
if all([os.path.isfile(path + '/' + db + '_hrxn_' + pj + '.pickle') for db in self.dbdict.keys()]):
complete_projects.append(pj)
return complete_projects
def load_subsets(self, modname='subsetgenerator', pythonpath=None):
"""For each component database, loads subsets from all functions
in module *modname*. Default *modname* usues standard generators.
"""
for db, odb in self.dbdict.items():
odb.load_subsets(modname=modname, pythonpath=pythonpath)
self._intersect_subsets()
def add_Subset(self, name, func):
"""Define a new subset labeled *name* by providing a database
*func* whose keys are the keys of dbdict and whose values are a
function that filters each WrappedDatabase's *self.hrxn*.
"""
label = name.lower()
merged = []
for db, odb in self.dbdict.items():
if callable(func[db]):
ssfunc = func[db]
else:
ssfunc = lambda x: func[db]
odb.add_Subset(name=name, func=ssfunc)
if name in odb.sset:
merged.append(name)
else:
merged.append(None)
if any(merged):
self.sset[label] = merged
print("""Database %s: Subset %s formed: %s""" % (self.dbse, label, self.sset[label]))
else:
print("""Database %s: Subset %s NOT formed: empty""" % (self.dbse, label))
def add_Subset_union(self, name, sslist):
"""
Define a new subset labeled *name* (note that there's nothing to
prevent overwriting an existing subset name) from the union of
existing named subsets in *sslist*.
"""
funcdb = {}
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
overlapping_dbrxns = []
for ss in sslist:
lss = self.sset[ss][dbix]
if lss is not None:
overlapping_dbrxns.append(self.dbdict[db].sset[lss].keys())
rxnlist = set().union(*overlapping_dbrxns)
funcdb[db] = rxnlist
self.add_Subset(name, funcdb)
def add_sampled_Subset(self, sset='default', number_of_samples=1, sample_size=5, prefix='rand'):
"""Generate and register *number_of_samples* new subsets of size
*sample_size* and name built from *prefix*. Reactions chosen from *sset*.
"""
import random
intrxn = self.integer_reactions()
rxns = self.get_hrxn(sset=sset).keys()
def random_sample(ssname):
"""Generate and register a single new subset of size *sample_size* and
name *ssname*.
"""
sample = {db: [] for db in self.dbdict.keys()}
for dbrxn in random.sample(rxns, sample_size):
db, rxn = dbrxn.split('-', 1)
typed_rxn = int(rxn) if intrxn[db] else rxn
sample[db].append(typed_rxn)
self.add_Subset(ssname, sample)
for sidx in range(number_of_samples):
if number_of_samples == 1:
ssname = prefix
else:
ssname = prefix + '_' + str(sidx)
random_sample(ssname)
def promote_Subset(self, name=None):
"""Examine component databases and elevate subset *name* not necessarily
present for all component databases to a subset for the *self*. When *name*
is None, promotes all subsets found for component databases. Also promotes
entirety of each component database as a subset with name of component
database dbse in lowercase.
"""
if name is None:
sss = [set(odb.sset.keys()) for db, odb in self.dbdict.items()]
new = sorted(set.union(*sss))
else:
new = [name]
for ss in new:
if ss not in self.sset:
self.sset[ss] = [ss if ss in odb.sset else None for db, odb in self.dbdict.items()]
print("""Database %s: Subset %s promoted: %s""" % (self.dbse, ss, self.sset[ss]))
if name is None and len(self.dbdict) > 1:
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
ss = odb.dbse.lower()
if ss not in self.sset:
self.sset[ss] = ['default' if ix == dbix else None for ix in range(len(self.dbdict))]
print("""Database %s: Subset %s promoted: %s""" % (self.dbse, ss, self.sset[ss]))
def _intersect_subsets(self):
"""Examine component database subsets and collect common names as
Database subset.
"""
sss = [set(odb.sset.keys()) for db, odb in self.dbdict.items()]
new = sorted(set.intersection(*sss))
for ss in new:
self.sset[ss] = [ss] * len(self.dbdict.keys())
def _intersect_modelchems(self):
"""Examine component database qcdata and collect common names as
Database modelchem.
"""
mcs = [set(odb.available_modelchems()) for odb in self.dbdict.itervalues()]
new = sorted(set.intersection(*mcs))
for mc in new:
self.mcs[mc] = [mc] * len(self.dbdict.keys())
# def reaction_generator(self):
# """
# """
# for db, odb in self.dbdict.items():
# for rxn, orxn in odb.hrxn.items():
# yield orxn
def compute_statistics(self, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False, returnindiv=False):
"""Computes summary statistics and, if *returnindiv* True,
individual errors for single model chemistry *modelchem* versus
*benchmark* over subset *sset* over all component databases.
Particularly, imposes cross-database definitions for sset and
modelchem.
#Returns error if model chemistries are missing
#for any reaction in subset unless *failoninc* set to False,
#whereupon returns partial statistics. Returns dictionary of
#statistics labels and values.
"""
errors = OrderedDict()
indiv = OrderedDict()
actvdb = []
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
if self.sset[sset][dbix] is None:
errors[db], indiv[db] = (None, None)
else:
errors[db], indiv[db] = odb.compute_statistics(self.mcs[modelchem][dbix],
sset=self.sset[sset][dbix],
benchmark='ZEROS' if benchmark == 'ZEROS' else self.mcs[benchmark][dbix],
failoninc=failoninc, verbose=verbose, returnindiv=True)
actvdb.append(errors[db])
errors[self.dbse] = average_errors(*actvdb)
if returnindiv:
return errors, indiv
else:
return errors
def analyze_modelchems(self, modelchem, benchmark='default', failoninc=True, verbose=False):
"""For each component database, compute and print nicely formatted
summary error statistics for each model chemistry in array
*modelchem* versus *benchmark* for all available subsets.
"""
# compute errors
errors = {}
for mc in modelchem:
errors[mc] = {}
for ss in self.sset.keys():
errors[mc][ss] = self.compute_statistics(mc, benchmark=benchmark, sset=ss,
failoninc=failoninc, verbose=verbose, returnindiv=False)
# present errors
pre, suf, mid = string_contrast(modelchem)
text = """\n ==> %s %s[]%s Errors <==\n""" % (self.dbse, pre, suf)
text += """%20s %44s""" % ('', '==> ' + self.dbse + ' <==')
for db, odb in self.dbdict.items():
text += """%44s""" % ('=> ' + odb.dbse + ' <=')
text += '\n'
collabel = """ {:5} {:4} {:6} {:6} {:6}""".format(
'ME', 'STDE', 'MAE', 'MA%E', 'MA%BE')
text += """{:20} """.format('') + collabel
for db in self.dbdict.keys():
text += collabel
text += '\n'
text += """{:20} {}""".format('', '=' * 44)
ul = False
for db in self.dbdict.keys():
text += """{}""".format('_' * 44 if ul else ' ' * 44)
ul = not ul
text += '\n'
for ss in self.sset.keys():
text += """ => %s <=\n""" % (ss)
for mc in modelchem:
perr = errors[mc][ss]
text += """%20s %44s""" % (mid[modelchem.index(mc)],
format_errors(perr[self.dbse]))
for db in self.dbdict.keys():
text += """%44s""" % ('' if perr[db] is None else format_errors(perr[db]))
text += '\n'
print(text)
def plot_bars(self, modelchem, benchmark='default', sset=['default', 'hb', 'mx', 'dd'],
failoninc=True, verbose=False, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Prepares 'grey bars' diagram for each model chemistry in array
*modelchem* versus *benchmark* over all component databases. A wide bar
is plotted with three smaller bars, corresponding to the 'mae'
summary statistic of the four subsets in *sset*.
*saveas* conveys directory ('/') and/or filename for saving the
resulting plot. File extension is not accessible, but *graphicsformat*
array requests among 'png', 'pdf', and 'eps' formats. *relpath*
forces paths to saved files to be relative to current directory,
rather than absolute paths for returned code and file dictionary.
Prepares bars diagram instructions and either executes them if
matplotlib available (Canopy or Anaconda) or prints them. Returns a
dictionary of all saved plot filenames.
>>> asdf.plot_bars(['MP2-CP-adz', 'MP2-CP-adtz'], sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
"""
# compute errors
errors = {}
for mc in modelchem:
if mc is not None:
errors[mc] = {}
for ss in sset:
errors[mc][ss] = self.compute_statistics(mc, benchmark=benchmark, sset=ss,
failoninc=failoninc, verbose=verbose, returnindiv=False)
# repackage
pre, suf, mid = string_contrast(modelchem)
dbdat = []
for mc in modelchem:
if mc is None:
dbdat.append(None)
else:
dbdat.append({'mc': mid[modelchem.index(mc)],
'data': [errors[mc][ss][self.dbse]['mae'] for ss in sset]})
title = self.dbse + ' ' + pre + '[]' + suf + ' ' + ','.join(sset)
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict = mpl.bars(%s,\n title='%s'\n saveas=%s\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, title, repr(saveas), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict = mpl.bars(dbdat, title=title,
view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
# def get_pec_weightinfo(self):
# """
#
# """
# def closest(u, options):
# return max(options, key=lambda v: len(os.path.commonprefix([u, v])))
#
# dbdat = {}
# for db, odb in self.dbdict.items():
# #dbix = self.dbdict.keys().index(db)
# oss = odb.oss['default']
# eqrxns = [rxn for rxn, rr in zip(oss.hrxn, oss.axis['Rrat']) if rr == 1.0]
# for rxnix, rxn in enumerate(oss.hrxn):
# dbrxn = '-'.join([db, rxn])
# rrat = oss.axis['Rrat'][rxnix]
# eq = closest(rxn, eqrxns)
# print rxn, rxnix, eq, rrat, dbrxn
# dbdat[dbrxn] = {'eq': eq, 'Rrat': rrat}
# return dbdat
def plot_axis(self, axis, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False, color='sapt', view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""
"""
dbdatdict = OrderedDict()
for mc in modelchem:
# compute errors
errors, indiv = self.compute_statistics(mc, benchmark=benchmark, sset=sset,
failoninc=failoninc, verbose=verbose, returnindiv=True)
# repackage
dbdat = []
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
oss = odb.oss[self.sset[sset][dbix]]
# TODO may need to make axis name distributable across wrappeddbs
# TODO not handling mc present bm absent
if indiv[db] is not None:
for rxn in oss.hrxn:
rxnix = oss.hrxn.index(rxn)
bm = self.mcs[benchmark][dbix]
bmpresent = False if (bm is None or bm not in odb.hrxn[rxn].data) else True
mcpresent = False if (self.mcs[mc][dbix] not in odb.hrxn[rxn].data) else True
entry = {'db': db,
'sys': str(rxn),
'color': odb.hrxn[rxn].color,
'axis': oss.axis[axis][rxnix]}
if bmpresent:
entry['bmdata'] = odb.hrxn[rxn].data[self.mcs[benchmark][dbix]].value
else:
entry['bmdata'] = None
if mcpresent:
entry['mcdata'] = odb.hrxn[rxn].data[self.mcs[mc][dbix]].value
else:
continue
if bmpresent and mcpresent:
entry['error'] = [indiv[db][rxn][0]]
else:
entry['error'] = [None]
dbdat.append(entry)
dbdatdict[fancify_mc_tag(mc).strip()] = dbdat
pre, suf, mid = string_contrast(modelchem)
title = """%s[%s]%s vs %s axis %s for %s subset %s""" % (pre, str(len(mid)), suf, benchmark, axis, self.dbse, sset)
print(title)
#for mc, dbdat in dbdatdict.items():
# print mc
# for d in dbdat:
# print '{:20s} {:8.2f} {:8.2f} {:8.2f}'.format(d['sys'], d['axis'],
# 0.0 if d['bmdata'] is None else d['bmdata'],
# 0.0 if d['mcdata'] is None else d['mcdata'])
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict = mpl.valerr(%s,\n color='%s',\n title='%s',\n xtitle='%s',\n view=%s\n saveas=%s\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, color, title, axis, view, repr(saveas), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict = mpl.valerr(dbdatdict, color=color, title=title, xtitle=axis,
view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def load_saptdata_frombfdb(self, sset='default',
pythonpath='/Users/loriab/linux/bfdb/sapt_punt', failoninc=True): # pythonpath=None
"""This is a stopgap function that loads sapt component data from
sapt_punt in bfdb repo.
"""
saptpackage = OrderedDict()
for db, odb in self.dbdict.items():
modname = 'sapt_' + odb.dbse
if pythonpath is not None:
sys.path.insert(1, pythonpath)
else:
sys.path.append(os.path.dirname(__file__) + '/../data')
try:
datamodule = __import__(modname)
except ImportError:
print("""\nPython module for database data %s failed to load\n\n""" % (modname))
print("""\nSearch path that was tried:\n""")
print(', '.join(map(str, sys.path)))
raise ValidationError("Python module loading problem for database subset generator " + str(modname))
try:
saptdata = getattr(datamodule, 'DATA')
except AttributeError:
raise ValidationError("SAPT punt module does not contain DATA" + str(modname))
saptmc = saptdata['SAPT MODELCHEM']
dbix = self.dbdict.keys().index(db)
for rxn, orxn in odb.hrxn.items():
lss = self.sset[sset][dbix]
if lss is not None:
if rxn in odb.sset[lss]:
dbrxn = orxn.dbrxn
try:
elst = saptdata['SAPT ELST ENERGY'][dbrxn]
exch = saptdata['SAPT EXCH ENERGY'][dbrxn]
ind = saptdata['SAPT IND ENERGY'][dbrxn]
disp = saptdata['SAPT DISP ENERGY'][dbrxn]
except (KeyError, AttributeError):
print("""Warning: DATA['SAPT * ENERGY'] missing for reaction %s""" % (dbrxn))
if failoninc:
break
else:
if not all([elst, ind, disp]): # exch sometimes physically zero
print("""Warning: DATA['SAPT * ENERGY'] missing piece for reaction %s: %s""" % (dbrxn, [elst, exch, ind, disp]))
if failoninc:
break
saptpackage[dbrxn] = {'mc': saptmc,
'elst': elst,
'exch': exch,
'ind': ind,
'disp': disp}
return saptpackage
def plot_ternary(self, sset='default', labeled=True,
pythonpath='/Users/loriab/linux/bfdb/sapt_punt', failoninc=True, # pythonpath=None
view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""This is a stopgap function that loads sapt component data from
sapt_punt in bfdb repo, then formats it to plot a ternary diagram.
"""
saptdata = self.load_saptdata_frombfdb(sset=sset, pythonpath=pythonpath,
failoninc=failoninc)
dbdat = []
mcs = []
for dat in saptdata.values():
dbdat.append([dat['elst'], dat['ind'], dat['disp']])
if dat['mc'] not in mcs:
mcs.append(dat['mc'])
title = ' '.join([self.dbse, sset, ' '.join(mcs)])
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
pass
# if not running from Canopy, print line to execute from Canopy
else:
# if running from Canopy, call mpl directly
filedict = mpl.ternary(dbdat, title=title, labeled=labeled,
view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def plot_flat(self, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False, color='sapt', xlimit=4.0, xlines=[0.0, 0.3, 1.0],
view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Computes individual errors and summary statistics for single
model chemistry *modelchem* versus *benchmark* over
subset *sset* over all component databases. Thread *color* can be
'rgb' for old coloring, a color name or 'sapt' for spectrum coloring.
*saveas* conveys directory ('/') and/or filename for saving the
resulting plot. File extension is not accessible, but *graphicsformat*
array requests among 'png', 'pdf', and 'eps' formats. *relpath*
forces paths to saved files to be relative to current directory,
rather than absolute paths for returned code and file dictionary.
Prepares flat diagram instructions and either executes them if
matplotlib available (Canopy or Anaconda) or prints them. Returns a
dictionary of all saved plot filenames.
asdf.plot_flat('CCSD-CP-atqzadz', failoninc=False)
"""
# compute errors
mc = modelchem
errors, indiv = self.compute_statistics(mc, benchmark=benchmark, sset=sset,
failoninc=failoninc, verbose=verbose, returnindiv=True)
# repackage
dbdat = []
for db, odb in self.dbdict.items():
if indiv[db] is not None:
for rxn in indiv[db].keys():
dbdat.append({'db': db,
'sys': str(rxn),
'color': odb.hrxn[rxn].color,
'data': [indiv[db][rxn][0]]})
pre, suf, mid = string_contrast(mc)
title = self.dbse + '-' + sset + ' ' + pre + '[]' + suf
mae = errors[self.dbse]['mae']
mape = None
# mape = 100 * errors[self.dbse]['mape']
mapbe = None
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict = mpl.flat(%s,\n color='%s',\n title='%s',\n mae=%s,\n mape=%s,\n xlimit=%s,\n xlines=%s,\n view=%s\n saveas=%s\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, color, mc, mae, mape, xlimit, repr(xlines), view, repr(saveas), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict = mpl.flat(dbdat, color=color, title=mc, mae=mae, mape=mape,
xlimit=xlimit, xlines=xlines, view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def write_xyz_files(self, path=None):
"""Writes xyz files for every reagent in the Database to directory
in *path* or to directory dbse_xyzfiles that it createsin cwd if
*path* is None. Additionally, writes a script to that directory
that will generate transparent-background ray-traced png files for
every reagent with PyMol.
"""
if path is None:
xyzdir = os.getcwd() + os.sep + self.dbse + '_xyzfiles' + os.sep
else:
xyzdir = os.path.abspath(path) + os.sep
if not os.path.exists(xyzdir):
os.mkdir(xyzdir)
for rgt, orgt in self.hrgt.items():
omol = Molecule(orgt.mol)
omol.update_geometry()
omol.save_xyz(xyzdir + rgt + '.xyz')
with open(xyzdir + 'pymol_xyz2png_script.pml', 'w') as handle:
handle.write("""
# Launch PyMOL and run from its command line:
# PyMOL> cd {}
# PyMOL> @{}
""".format(xyzdir, 'pymol_xyz2png_script.pml'))
for rgt in self.hrgt.keys():
handle.write("""
load {xyzfile}
hide lines
show sticks
color grey, name c
cmd.set('''opaque_background''','''0''',quiet=0)
reset
orient
cmd.zoom(buffer=0.3, complete=1)
ray
png {pngfile}
reinitialize
""".format(
xyzfile=xyzdir + rgt + '.xyz',
pngfile=xyzdir + rgt + '.png'))
def plot_all_flats(self, modelchem=None, sset='default', xlimit=4.0,
failoninc=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Generate pieces for inclusion into tables. Supply list of
modelchemistries to plot from *modelchem*, otherwise defaults to
all those available. Can modify subset *sset* and plotting
range *xlimit*.
>>> asdf.plot_all_flats(sset='tt-5min', xlimit=4.0)
"""
mcs = self.mcs.keys() if modelchem is None else modelchem
filedict = OrderedDict()
for mc in sorted(mcs):
minifiledict = self.plot_flat(mc, sset=sset, xlimit=xlimit, view=False,
failoninc=failoninc,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
filedict[mc] = minifiledict
return filedict
def get_hrxn(self, sset='default'):
"""
"""
rhrxn = OrderedDict()
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
lss = self.sset[sset][dbix]
if lss is not None:
for rxn in odb.hrxn:
if rxn in odb.sset[lss]:
orxn = odb.hrxn[rxn]
rhrxn[orxn.dbrxn] = orxn # this is a change and conflict with vergil version
return rhrxn
def get_hrgt(self, sset='default', actv='default'):
"""
"""
rhrxn = self.get_hrxn(sset=sset)
rhrgt = OrderedDict()
for rxn, orxn in rhrxn.items():
for orgt in orxn.rxnm[actv].keys():
rhrgt[orgt.name] = orgt
# TODO prob need to avoid duplicates or pass
return rhrgt
def get_reactions(self, modelchem, sset='default', benchmark='default',
failoninc=True):
"""Collects the reactions present in *sset* from each WrappedDatabase,
checks that *modelchem* and *benchmark* ReactionDatum are present
(fails if *failoninc* True), then returns in an array a tuple for
each reaction containing the modelchem key needed to access
*modelchem*, the modelchem key needed to access *benchmark*, and
the Reaction object.
"""
dbdat = []
rhrxn = self.get_hrxn(sset=sset)
for orxn in rhrxn.itervalues():
dbix = self.dbdict.keys().index(orxn.dbrxn.split('-')[0])
lmc = self.mcs[modelchem][dbix]
lbm = self.mcs[benchmark][dbix]
try:
orxn.data[lbm]
except KeyError as e:
# not sure if should treat bm differently
lbm = None
try:
orxn.data[lmc]
except KeyError as e:
if failoninc:
raise e
else:
lmc = None
dbdat.append((lmc, lbm, orxn))
# this is diff in that returning empties not just pass over- may break bfdb
# try:
# orxn.data[lmc]
# orxn.data[lbm]
# except KeyError as e:
# if failoninc:
# raise e
# else:
# # not sure yet if should return empties or just pass over
# pass
# else:
# dbdat.append((lmc, lbm, orxn))
return dbdat
def get_missing_reactions(self, modelchem, sset='default'):
"""Returns a dictionary (keys self.dbse and all component
WrappedDatabase.dbse) of two elements, the first being the number
of reactions *sset* should contain and the second being a list of
the reaction names (dbrxn) not available for *modelchem*. Absence
of benchmark not considered.
"""
counts = OrderedDict()
counts[self.dbse] = [0, []]
soledb = True if (len(self.dbdict) == 1 and self.dbdict.items()[0][0] == self.dbse) else False
if not soledb:
for db in self.dbdict.keys():
counts[db] = [0, []]
for (lmc, lbm, orxn) in self.get_reactions(modelchem, benchmark='default',
sset=sset, failoninc=False):
db, rxn = orxn.dbrxn.split('-', 1)
mcdatum = orxn.data[lmc].value if lmc else None
counts[self.dbse][0] += 1
if not soledb:
counts[db][0] += 1
if mcdatum is None:
counts[self.dbse][1].append(orxn.dbrxn)
if not soledb:
counts[db][1].append(orxn.dbrxn)
return counts
def plot_disthist(self, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False, xtitle='', view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Computes individual errors and summary statistics for single
model chemistry *modelchem* versus *benchmark* over
subset *sset* over all component databases. Computes histogram
of errors and gaussian distribution.
*saveas* conveys directory ('/') and/or filename for saving the
resulting plot. File extension is not accessible, but *graphicsformat*
array requests among 'png', 'pdf', and 'eps' formats. *relpath*
forces paths to saved files to be relative to current directory,
rather than absolute paths for returned code and file dictionary.
Prepares disthist diagram instructions and either executes them if
matplotlib available (Canopy or Anaconda) or prints them. Returns a
dictionary of all saved plot filenames.
>>>
"""
# compute errors
mc = modelchem
errors, indiv = self.compute_statistics(mc, benchmark=benchmark, sset=sset,
failoninc=failoninc, verbose=verbose, returnindiv=True)
# repackage
dbdat = []
for db in self.dbdict.keys():
if indiv[db] is not None:
for rxn in indiv[db].keys():
dbdat.append(indiv[db][rxn][0])
title = """%s vs %s for %s subset %s""" % (mc, benchmark, self.dbse, sset)
me = errors[self.dbse]['me']
stde = errors[self.dbse]['stde']
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict = mpl.disthist(%s,\n title='%s',\n xtitle='%s'\n me=%s,\n stde=%s,\n saveas=%s,\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, title, xtitle, me, stde, repr(saveas), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict = mpl.disthist(dbdat, title=title, xtitle=xtitle, me=me, stde=stde,
view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def plot_modelchems(self, modelchem, benchmark='default', mbenchmark=None,
sset='default', msset=None, failoninc=True, verbose=False, color='sapt',
xlimit=4.0, labeled=True, view=True,
mousetext=None, mouselink=None, mouseimag=None, mousetitle=None, mousediv=None,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Computes individual errors and summary statistics over all component
databases for each model chemistry in array *modelchem* versus *benchmark*
over subset *sset*. *mbenchmark* and *msset* are array options (same
length as *modelchem*) that override *benchmark* and *sset*, respectively,
for non-uniform specification. Thread *color* can be 'rgb' for old
coloring, a color name or 'sapt' for spectrum coloring.
*saveas* conveys directory ('/') and/or filename for saving the
resulting plot. File extension is not accessible, but *graphicsformat*
array requests among 'png', 'pdf', and 'eps' formats. *relpath*
forces paths to saved files to be relative to current directory,
rather than absolute paths for returned code and file dictionary.
Prepares thread diagram instructions and either executes them if
matplotlib available (Canopy or Anaconda) or prints them. Returns a
dictionary of all saved plot filenames. If any of *mousetext*, *mouselink*,
or *mouseimag* is specified, htmlcode will be returned with an image map of
slats to any of text, link, or image, respectively.
"""
# distribute benchmark
if mbenchmark is None:
lbenchmark = [benchmark] * len(modelchem) # normal bm modelchem name
else:
if isinstance(mbenchmark, basestring) or len(mbenchmark) != len(modelchem):
raise ValidationError(
"""mbenchmark must be array of length distributable among modelchem""" % (str(mbenchmark)))
else:
lbenchmark = mbenchmark # array of bm for each modelchem
# distribute sset
if msset is None:
lsset = [sset] * len(modelchem) # normal ss name like 'MX'
else:
if isinstance(msset, basestring) or len(msset) != len(modelchem):
raise ValidationError("""msset must be array of length distributable among modelchem""" % (str(msset)))
else:
lsset = msset # array of ss for each modelchem
# compute errors
index = []
errors = {}
indiv = {}
for mc, bm, ss in zip(modelchem, lbenchmark, lsset):
ix = '%s_%s_%s' % (ss, mc, bm)
index.append(ix)
errors[ix], indiv[ix] = self.compute_statistics(mc, benchmark=bm, sset=ss,
failoninc=failoninc, verbose=verbose, returnindiv=True)
# repackage
dbdat = []
for db, odb in self.dbdict.items():
dbix = self.dbdict.keys().index(db)
for rxn in odb.hrxn:
data = []
for ix in index:
if indiv[ix][db] is not None:
if rxn in odb.sset[self.sset[lsset[index.index(ix)]][dbix]]:
try:
data.append(indiv[ix][db][rxn][0])
except KeyError as e:
if failoninc:
raise e
else:
data.append(None)
else:
data.append(None)
else:
data.append(None)
if not data or all(item is None for item in data):
pass # filter out empty reactions
else:
dbdat.append({'db': db,
'sys': str(rxn),
'show': str(rxn),
'color': odb.hrxn[rxn].color,
'data': data})
mae = [errors[ix][self.dbse]['mae'] for ix in index]
mape = [100 * errors[ix][self.dbse]['mape'] for ix in index]
# form unique filename
ixpre, ixsuf, ixmid = string_contrast(index)
title = self.dbse + ' ' + ixpre + '[]' + ixsuf
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""filedict, htmlcode = mpl.threads(%s,\n color='%s',\n title='%s',\n labels=%s,\n mae=%s,\n mape=%s\n xlimit=%s\n labeled=%s\n saveas=%s\n mousetext=%s\n mouselink=%s\n mouseimag=%s\n mousetitle=%s,\n mousediv=%s,\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, color, title, ixmid, mae, mape, str(xlimit),
repr(labeled), repr(saveas), repr(mousetext), repr(mouselink), repr(mouseimag),
repr(mousetitle), repr(mousediv), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict, htmlcode = mpl.threads(dbdat, color=color, title=title, labels=ixmid, mae=mae, mape=mape,
xlimit=xlimit, labeled=labeled, view=view,
mousetext=mousetext, mouselink=mouselink,
mouseimag=mouseimag, mousetitle=mousetitle, mousediv=mousediv,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict, htmlcode
def plot_liliowa(self, modelchem, benchmark='default',
failoninc=True, xlimit=2.0, view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""
Note that not possible to access sset of component databases. That is, for Database SSIBBI, SSI-only arylaryl is accessible b/c not defined in BBI, but SSI-only neutral is not accessible.
"""
# compute errors
mc = modelchem
errors = {}
for ss in self.sset.keys():
errors[ss] = self.compute_statistics(mc, benchmark=benchmark, sset=ss,
failoninc=failoninc, verbose=False, returnindiv=False)
# repackage
dbdat = []
ssarray = ['pospos', 'posneg', 'pospolar', 'posaliph', 'posaryl',
None, 'negneg', 'negpolar', 'negaliph', 'negaryl',
None, None, 'polarpolar', 'polaraliph', 'polararyl',
None, None, None, 'aliphaliph', 'alipharyl',
None, None, None, None, 'arylaryl']
for ss in ssarray:
dbdat.append(0.0 if ss is None else errors[ss][self.dbse]['mae'])
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
print('Matplotlib not avail')
else:
filedict = mpl.liliowa(dbdat, xlimit=xlimit, view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def plot_iowa(self, modelchem, benchmark='default', sset='default',
failoninc=True, verbose=False,
title='', xtitle='', xlimit=2.0,
view=True,
saveas=None, relpath=False, graphicsformat=['pdf']):
"""Computes individual errors for single *modelchem* versus
*benchmark* over subset *sset*. Coloring green-to-purple with
maximum intensity at *xlimit*. Prepares Iowa plot instructions and
either executes them if matplotlib available (Canopy) or prints them.
"""
title = self.dbse + ' ' + modelchem
# compute errors
mc = modelchem
errors, indiv = self.compute_statistics(mc, benchmark=benchmark, sset=sset,
failoninc=failoninc, verbose=verbose, returnindiv=True)
# repackage
dbdat = []
dblbl = []
for db in self.dbdict.keys():
if indiv[db] is not None:
for rxn in indiv[db].keys():
dbdat.append(indiv[db][rxn][0])
dblbl.append(str(rxn))
title = """%s vs %s for %s subset %s""" % (mc, benchmark, self.dbse, sset)
me = errors[self.dbse]['me']
# generate matplotlib instructions and call or print
try:
from . import mpl
import matplotlib.pyplot as plt
except ImportError:
# if not running from Canopy, print line to execute from Canopy
print("""mpl.iowa(%s,\n %s,\n title='%s',\n xtitle='%s'\n xlimit=%s,\n saveas=%s,\n relpath=%s\n graphicsformat=%s)\n\n""" %
(dbdat, dblbl, title, xtitle, xlimit, repr(saveas), repr(relpath), repr(graphicsformat)))
else:
# if running from Canopy, call mpl directly
filedict = mpl.iowa(dbdat, dblbl, title=title, xtitle=xtitle, xlimit=xlimit,
view=view,
saveas=saveas, relpath=relpath, graphicsformat=graphicsformat)
return filedict
def export_pandas(self, modelchem=[], benchmark='default', sset='default', modelchemlabels=None,
failoninc=True):
"""
*modelchem* is array of model chemistries, if modelchem is empty, get only benchmark
is benchmark needed?
"""
import pandas as pd
import numpy as np
if self.dbse not in ['ACONF', 'SCONF', 'PCONF', 'CYCONF']:
saptdata = self.load_saptdata_frombfdb(sset=sset, pythonpath='/Users/loriab/linux/bfdb/sapt_punt',
failoninc=failoninc)
listodicts = []
rhrxn = self.get_hrxn(sset=sset)
for dbrxn, orxn in rhrxn.items():
wdb = dbrxn.split('-')[0]
dbix = self.dbdict.keys().index(wdb)
wbm = self.mcs[benchmark][dbix]
wss = self.sset[sset][dbix]
woss = self.dbdict[wdb].oss[wss]
try:
Rrat = woss.axis['Rrat'][woss.hrxn.index(orxn.name)]
except KeyError:
Rrat = 1.0 # TODO generic soln?
dictorxn = {}
dictorxn['DB'] = wdb
dictorxn['System'] = orxn.tagl
dictorxn['Name'] = orxn.name
dictorxn['R'] = Rrat
dictorxn['System #'] = orxn.indx
dictorxn['Benchmark'] = np.NaN if orxn.benchmark is None else orxn.data[
wbm].value # this NaN exception is new and experimental
dictorxn['QcdbSys'] = orxn.dbrxn
if self.dbse not in ['ACONF', 'SCONF', 'PCONF', 'CYCONF']:
dictorxn['SAPT ELST ENERGY'] = saptdata[dbrxn]['elst']
dictorxn['SAPT EXCH ENERGY'] = saptdata[dbrxn]['exch']
dictorxn['SAPT IND ENERGY'] = saptdata[dbrxn]['ind']
dictorxn['SAPT DISP ENERGY'] = saptdata[dbrxn]['disp']
dictorxn['SAPT TOTAL ENERGY'] = dictorxn['SAPT ELST ENERGY'] + dictorxn['SAPT EXCH ENERGY'] + \
dictorxn['SAPT IND ENERGY'] + dictorxn['SAPT DISP ENERGY']
orgts = orxn.rxnm['default'].keys()
omolD = Molecule(orgts[0].mol) # TODO this is only going to work with Reaction ~= Reagent databases
npmolD = omolD.format_molecule_for_numpy()
omolA = Molecule(orgts[1].mol) # TODO this is only going to work with Reaction ~= Reagent databases
omolA.update_geometry()
dictorxn['MonA'] = omolA.natom()
# this whole member fn not well defined for db of varying stoichiometry
if self.dbse in ['ACONF', 'SCONF', 'PCONF', 'CYCONF']:
npmolD = omolD.format_molecule_for_numpy()
npmolA = omolA.format_molecule_for_numpy()
dictorxn['Geometry'] = np.vstack([npmolD, npmolA])
else:
dictorxn['Geometry'] = omolD.format_molecule_for_numpy()
# print '\nD', npmolD.shape[0], npmolA.shape[0], dictorxn['MonA'], npmolD, npmolA, dictorxn['Geometry']
for mc in modelchem:
try:
wmc = self.mcs[mc][dbix]
except KeyError:
# modelchem not in Database at all
print(mc, 'not found')
continue
key = mc if modelchemlabels is None else modelchemlabels[modelchem.index(mc)]
try:
dictorxn[key] = orxn.data[wmc].value
except KeyError as e:
# reaction not in modelchem
if failoninc:
raise ValidationError("""Reaction %s missing datum %s.""" % (key, str(e)))
else:
print(mc, str(e), 'not found')
continue
listodicts.append(dictorxn)
df = pd.DataFrame(listodicts)
pd.set_option('display.width', 500)
print(df.head(5))
print(df.tail(5))
return df
def table_reactions(self, modelchem, benchmark='default', sset='default',
failoninc=True,
columnplan=['indx', 'tagl', 'bm', 'mc', 'e', 'pe'],
title="""Reaction energies [kcal/mol] for {sset} $\subset$ {dbse} with {mc}""",
indextitle="""Detailed results for {sset} $\subset$ {dbse} with {mc}""",
plotpath='analysis/mols/',
standalone=True, theme='rxns', filename=None):
r"""Prepare single LaTeX table to *filename* or return lines if None showing
the per-reaction results for reactions in *sset* for single or array
or 'all' *modelchem*, where the last uses self.mcs(), model chemistries
versus *benchmark*. Use *failoninc* to toggle between command failing
or blank lines in table. Use *standalone* to toggle between full
compilable document and suitable for inclusion in another LaTeX document.
Use *columnplan* to customize column (from among columnreservoir, below)
layout. Use *title* and *indextitle* to customize table caption and
table-of-contents caption, respectively; variables in curly braces will
be substituted. Use *theme* to customize the \ref{tbl:} code.
"""
# define eligible columns for inclusion
columnreservoir = {
'dbrxn': ['l', r"""\textbf{Reaction}""", """{0:25s}"""],
'indx': ['r', '', """{0:14s}"""],
'tagl': ['l', r"""\textbf{Reaction}""", """{0:50s}"""],
'bm': ['d', r"""\multicolumn{1}{c}{\textbf{Benchmark}}""", """{0:8.2f}"""],
'mc': ['d', r"""\multicolumn{1}{c}{\textbf{ModelChem}}""", """{0:8.2f}"""],
'e': ['d', r"""\multicolumn{1}{c}{\textbf{Error}}""", """{0:8.2f}"""],
'pe': ['d', r"""\multicolumn{1}{c}{\textbf{\% Err.}}""", """{0:8.1f}"""],
'imag': ['l', '', r"""\includegraphics[width=1.0cm,height=3.5mm]{%s%%ss.png}""" % (plotpath)], # untested
}
for col in columnplan:
if col not in columnreservoir.keys():
raise ValidationError('Column {0} not recognized. Register with columnreservoir.'.format(col))
if isinstance(modelchem, basestring):
if modelchem.lower() == 'all':
mcs = sorted(self.mcs.keys())
else:
mcs = [modelchem]
else:
mcs = modelchem
# commence to generate LaTeX code
tablelines = []
indexlines = []
if standalone:
tablelines += textables.begin_latex_document()
# iterate to produce one LaTeX table per modelchem
for mc in mcs:
# prepare summary statistics
perr = self.compute_statistics(mc, benchmark=benchmark, sset=sset,
failoninc=failoninc, verbose=False,
returnindiv=False)
serrors = OrderedDict()
for db in self.dbdict.keys():
serrors[db] = None if perr[db] is None else format_errors(perr[db], mode=3)
serrors[self.dbse] = format_errors(perr[self.dbse], mode=3)
# prepare individual reactions and errors
terrors = OrderedDict()
isComplete = True
for (lmc, lbm, orxn) in self.get_reactions(mc, benchmark=benchmark,
sset=sset, failoninc=failoninc):
tmp = {}
dbrxn = orxn.dbrxn
tmp['dbrxn'] = dbrxn.replace('_', '\\_')
tmp['indx'] = r"""\textit{""" + str(orxn.indx) + """}"""
tmp['tagl'] = dbrxn.split('-')[0] + ' ' + \
(orxn.latex if orxn.latex else orxn.tagl.replace('_', '\\_'))
tmp['imag'] = None # name of primary rgt
bmdatum = orxn.data[lbm].value if lbm else None
mcdatum = orxn.data[lmc].value if lmc else None
tmp['bm'] = bmdatum
tmp['mc'] = mcdatum
if lmc and lbm:
tmp['e'] = mcdatum - bmdatum
tmp['pe'] = 100 * (mcdatum - bmdatum) / abs(bmdatum)
# TODO redefining errors not good practice
else:
isComplete = False
tmp['e'] = None
tmp['pe'] = None
terrors[dbrxn] = {}
for c in columnreservoir.keys():
terrors[dbrxn][c] = '' if tmp[c] is None else \
columnreservoir[c][2].format(tmp[c])
fancymodelchem = self.fancy_mcs(latex=True)[mc]
thistitle = title.format(dbse=self.dbse, mc=fancymodelchem,
sset='All' if sset == 'default' else sset.upper())
lref = [r"""tbl:qcdb"""]
if theme:
lref.append(theme)
lref.append(self.dbse)
if sset != 'default':
lref.append(sset)
lref.append(mc)
ref = '-'.join(lref)
# table intro
tablelines.append(r"""\begingroup""")
tablelines.append(r"""\squeezetable""")
tablelines.append(r"""\LTcapwidth=\textwidth""")
tablelines.append(r"""\begin{longtable}{%s}""" % (''.join([columnreservoir[col][0] for col in columnplan])))
tablelines.append(r"""\caption{%s""" % (thistitle))
tablelines.append(r"""\label{%s}} \\ """ % (ref))
tablelines.append(r"""\hline\hline""")
columntitles = [columnreservoir[col][1] for col in columnplan]
# initial header
tablelines.append(' & '.join(columntitles) + r""" \\ """)
tablelines.append(r"""\hline""")
tablelines.append(r"""\endfirsthead""")
# to be continued header
tablelines.append(r"""\multicolumn{%d}{@{}l}{\textit{\ldots continued} %s} \\ """ %
(len(columnplan), fancymodelchem))
tablelines.append(r"""\hline\hline""")
tablelines.append(' & '.join(columntitles) + r""" \\ """)
tablelines.append(r"""\hline""")
tablelines.append(r"""\endhead""")
# to be continued footer
tablelines.append(r"""\hline\hline""")
tablelines.append(r"""\multicolumn{%d}{r@{}}{\textit{continued \ldots}} \\ """ %
(len(columnplan)))
tablelines.append(r"""\endfoot""")
# final footer
tablelines.append(r"""\hline\hline""")
tablelines.append(r"""\endlastfoot""")
# table body
for dbrxn, stuff in terrors.items():
tablelines.append(' & '.join([stuff[col] for col in columnplan]) + r""" \\ """)
# table body summary
if any(col in ['e', 'pe'] for col in columnplan):
field_to_put_labels = [col for col in ['tagl', 'dbrxn', 'indx'] if col in columnplan]
if field_to_put_labels:
for block, blkerrors in serrors.items():
if blkerrors: # skip e.g., NBC block in HB of DB4
tablelines.append(r"""\hline""")
summlines = [[] for i in range(8)]
for col in columnplan:
if col == field_to_put_labels[0]:
summlines[0].append(
r"""\textbf{Summary Statistics: %s%s}%s""" % \
('' if sset == 'default' else sset + r""" $\subset$ """,
block,
'' if isComplete else r""", \textit{partial}"""))
summlines[1].append(r"""\textit{Minimal Signed Error} """)
summlines[2].append(r"""\textit{Minimal Absolute Error} """)
summlines[3].append(r"""\textit{Maximal Signed Error} """)
summlines[4].append(r"""\textit{Maximal Absolute Error} """)
summlines[5].append(r"""\textit{Mean Signed Error} """)
summlines[6].append(r"""\textit{Mean Absolute Error} """)
summlines[7].append(r"""\textit{Root-Mean-Square Error} """)
elif col in ['e', 'pe']:
summlines[0].append('')
summlines[1].append(blkerrors['nex' + col])
summlines[2].append(blkerrors['min' + col])
summlines[3].append(blkerrors['pex' + col])
summlines[4].append(blkerrors['max' + col])
summlines[5].append(blkerrors['m' + col])
summlines[6].append(blkerrors['ma' + col])
summlines[7].append(blkerrors['rms' + col])
else:
for ln in range(len(summlines)):
summlines[ln].append('')
for ln in range(len(summlines)):
tablelines.append(' & '.join(summlines[ln]) + r""" \\ """)
# table conclusion
tablelines.append(r"""\end{longtable}""")
tablelines.append(r"""\endgroup""")
tablelines.append(r"""\clearpage""")
tablelines.append('\n\n')
# form table index
thisindextitle = indextitle.format(dbse=self.dbse, mc=fancymodelchem.strip(),
sset='All' if sset == 'default' else sset.upper())
indexlines.append(r"""\scriptsize \ref{%s} & \scriptsize %s \\ """ % \
(ref, thisindextitle))
if standalone:
tablelines += textables.end_latex_document()
# form table and index return structures
if filename is None:
return tablelines, indexlines
else:
if filename.endswith('.tex'):
filename = filename[:-4]
with open(filename + '.tex', 'w') as handle:
handle.write('\n'.join(tablelines))
with open(filename + '_index.tex', 'w') as handle:
handle.write('\n'.join(indexlines) + '\n')
print("""\n LaTeX index written to {filename}_index.tex\n"""
""" LaTeX table written to {filename}.tex\n"""
""" >>> pdflatex {filename}\n"""
""" >>> open /Applications/Preview.app {filename}.pdf\n""".format(filename=filename))
filedict = {'data': os.path.abspath(filename) + '.tex',
'index': os.path.abspath(filename + '_index.tex')}
return filedict
def table_wrapper(self, mtd, bas, tableplan, benchmark='default',
opt=['CP'], err=['mae'], sset=['default'], dbse=None,
opttarget=None,
failoninc=True,
xlimit=4.0, xlines=[0.0, 0.3, 1.0],
ialimit=2.0,
plotpath='autogen',
subjoin=True,
title=None, indextitle=None,
suppressblanks=False,
standalone=True, theme=None, filename=None):
"""Prepares dictionary of errors for all combinations of *mtd*, *opt*,
*bas* with respect to model chemistry *benchmark*, mindful of *failoninc*.
The general plan for the table, as well as defaults for landscape,
footnotes, *title*, *indextitle, and *theme* are got from function
*tableplan*. Once error dictionary is ready, it and all other arguments
are passed along to textables.table_generic. Two arrays, one of table
lines and one of index lines are returned unless *filename* is given,
in which case they're written to file and a filedict returned.
"""
# get plan for table from *tableplan* and some default values
kwargs = {'plotpath': plotpath,
'subjoin': subjoin,
'xlines': xlines,
'xlimit': xlimit,
'ialimit': ialimit}
rowplan, columnplan, landscape, footnotes, \
suggestedtitle, suggestedtheme = tableplan(**kwargs)
#suggestedtitle, suggestedtheme = tableplan(plotpath=plotpath, subjoin=subjoin)
# make figure files write themselves
autothread = {}
autoliliowa = {}
if plotpath == 'autogen':
for col in columnplan:
if col[3].__name__ == 'flat':
if col[4] and autothread:
print('TODO: merge not handled')
elif col[4] or autothread:
autothread.update(col[4])
else:
autothread = {'dummy': True}
elif col[3].__name__ == 'liliowa':
autoliliowa = {'dummy': True}
# negotiate some defaults
dbse = [self.dbse] if dbse is None else dbse
theme = suggestedtheme if theme is None else theme
title = suggestedtitle if title is None else title
indextitle = title if indextitle is None else indextitle
opttarget = {'default': ['']} if opttarget is None else opttarget
def unify_options(orequired, opossible):
"""Perform a merge of options tags in *orequired* and *opossible* so
that the result is free of duplication and has the mode at the end.
"""
opt_combos = []
for oreq in orequired:
for opos in opossible:
pieces = sorted(set(oreq.split('_') + opos.split('_')))
if '' in pieces:
pieces.remove('')
for mode in ['CP', 'unCP', 'SA']:
if mode in pieces:
pieces.remove(mode)
pieces.append(mode)
pieces = '_'.join(pieces)
opt_combos.append(pieces)
return opt_combos
# gather list of model chemistries for table
mcs = ['-'.join(prod) for prod in itertools.product(mtd, opt, bas)]
mc_translator = {}
for m, o, b in itertools.product(mtd, opt, bas):
nominal_mc = '-'.join([m, o, b])
for oo in unify_options([o], opttarget['default']):
trial_mc = '-'.join([m, oo, b])
try:
perr = self.compute_statistics(trial_mc, benchmark=benchmark, sset='default', # prob. too restrictive by choosing subset
failoninc=False, verbose=False, returnindiv=False)
except KeyError as e:
continue
else:
mc_translator[nominal_mc] = trial_mc
break
else:
mc_translator[nominal_mc] = None
# compute errors
serrors = {}
for mc in mcs:
serrors[mc] = {}
for ss in self.sset.keys():
serrors[mc][ss] = {}
if mc_translator[mc] in self.mcs:
# Note: not handling when one component Wdb has one translated pattern and another another
perr = self.compute_statistics(mc_translator[mc], benchmark=benchmark, sset=ss,
failoninc=failoninc, verbose=False, returnindiv=False)
serrors[mc][ss][self.dbse] = format_errors(perr[self.dbse], mode=3)
if not failoninc:
mcsscounts = self.get_missing_reactions(mc_translator[mc], sset=ss)
serrors[mc][ss][self.dbse]['tgtcnt'] = mcsscounts[self.dbse][0]
serrors[mc][ss][self.dbse]['misscnt'] = len(mcsscounts[self.dbse][1])
if autothread:
if ('sset' in autothread and ss in autothread['sset']) or ('sset' not in autothread):
mcssplots = self.plot_flat(mc_translator[mc], benchmark=benchmark, sset=ss,
failoninc=failoninc, color='sapt', xlimit=xlimit, xlines=xlines, view=False,
saveas='flat_' + '-'.join([self.dbse, ss, mc]), relpath=True, graphicsformat=['pdf'])
serrors[mc][ss][self.dbse]['plotflat'] = mcssplots['pdf']
if autoliliowa and ss == 'default':
mcssplots = self.plot_liliowa(mc_translator[mc], benchmark=benchmark,
failoninc=failoninc, xlimit=ialimit, view=False,
saveas='liliowa_' + '-'.join([self.dbse, ss, mc]), relpath=True, graphicsformat=['pdf'])
serrors[mc][ss][self.dbse]['plotliliowa'] = mcssplots['pdf']
for db in self.dbdict.keys():
if perr[db] is None:
serrors[mc][ss][db] = None
else:
serrors[mc][ss][db] = format_errors(perr[db], mode=3)
if not failoninc:
serrors[mc][ss][db]['tgtcnt'] = mcsscounts[db][0]
serrors[mc][ss][db]['misscnt'] = len(mcsscounts[db][1])
else:
serrors[mc][ss][self.dbse] = format_errors(initialize_errors(), mode=3)
for db in self.dbdict.keys():
serrors[mc][ss][db] = format_errors(initialize_errors(), mode=3)
for key in serrors.keys():
print("""{:>35}{:>35}{}""".format(key, mc_translator[key], serrors[key]['default'][self.dbse]['mae']))
# find indices that would be neglected in a single sweep over table_generic
keysinplan = set(sum([col[-1].keys() for col in columnplan], rowplan))
obvious = {'dbse': dbse, 'sset': sset, 'mtd': mtd, 'opt': opt, 'bas': bas, 'err': err}
for key, vari in obvious.items():
if len(vari) == 1 or key in keysinplan:
del obvious[key]
iteroers = [(prod) for prod in itertools.product(*obvious.values())]
# commence to generate LaTeX code
tablelines = []
indexlines = []
if standalone:
tablelines += textables.begin_latex_document()
for io in iteroers:
actvargs = dict(zip(obvious.keys(), [[k] for k in io]))
nudbse = actvargs['dbse'] if 'dbse' in actvargs else dbse
nusset = actvargs['sset'] if 'sset' in actvargs else sset
numtd = actvargs['mtd'] if 'mtd' in actvargs else mtd
nuopt = actvargs['opt'] if 'opt' in actvargs else opt
nubas = actvargs['bas'] if 'bas' in actvargs else bas
nuerr = actvargs['err'] if 'err' in actvargs else err
table, index = textables.table_generic(
mtd=numtd, bas=nubas, opt=nuopt, err=nuerr, sset=nusset, dbse=nudbse,
rowplan=rowplan, columnplan=columnplan, serrors=serrors,
plotpath='' if plotpath == 'autogen' else plotpath,
subjoin=subjoin,
title=title, indextitle=indextitle,
suppressblanks=suppressblanks,
landscape=landscape, footnotes=footnotes,
standalone=False, theme=theme)
tablelines += table
tablelines.append('\n\n')
indexlines += index
if standalone:
tablelines += textables.end_latex_document()
# form table and index return structures
if filename is None:
return tablelines, indexlines
else:
if filename.endswith('.tex'):
filename = filename[:-4]
with open(filename + '.tex', 'w') as handle:
handle.write('\n'.join(tablelines))
with open(filename + '_index.tex', 'w') as handle:
handle.write('\n'.join(indexlines))
print("""\n LaTeX index written to {filename}_index.tex\n"""
""" LaTeX table written to {filename}.tex\n"""
""" >>> pdflatex {filename}\n"""
""" >>> open /Applications/Preview.app {filename}.pdf\n""".format(filename=filename))
filedict = {'data': os.path.abspath(filename) + '.tex',
'index': os.path.abspath(filename + '_index.tex')}
return filedict
def table_scrunch(self, plotpath, subjoin):
rowplan = ['mtd']
columnplan = [
['l', r'Method', '', textables.label, {}],
['c', r'Description', '', textables.empty, {}],
['d', r'aug-cc-pVDZ', 'unCP', textables.val, {'bas': 'adz', 'opt': 'unCP'}],
['d', r'aug-cc-pVDZ', 'CP', textables.val, {'bas': 'adz', 'opt': 'CP'}],
['d', r'aug-cc-pVTZ', 'unCP', textables.val, {'bas': 'atz', 'opt': 'unCP'}],
['d', r'aug-cc-pVTZ', 'CP', textables.val, {'bas': 'atz', 'opt': 'CP'}]]
footnotes = []
landscape = False
theme = 'summavg'
title = r"""Classification and Performance of model chemistries. Interaction energy [kcal/mol] {{err}} statistics.""".format()
return rowplan, columnplan, landscape, footnotes, title, theme
def table_merge_abbr(self, plotpath, subjoin):
"""Specialization of table_generic into table with minimal statistics
(three S22 and three overall) plus embedded slat diagram as suitable
for main paper. A single table is formed in sections by *bas* with
lines *mtd* within each section.
"""
rowplan = ['bas', 'mtd']
columnplan = [
['l', r"""Method \& Basis Set""", '', textables.label, {}],
['d', r'S22', 'HB', textables.val, {'sset': 'hb', 'dbse': 'S22'}],
['d', r'S22', 'MX/DD', textables.val, {'sset': 'mxdd', 'dbse': 'S22'}],
['d', r'S22', 'TT', textables.val, {'sset': 'tt', 'dbse': 'S22'}],
['d', r'Overall', 'HB', textables.val, {'sset': 'hb', 'dbse': 'DB4'}],
['d', r'Overall', 'MX/DD', textables.val, {'sset': 'mxdd', 'dbse': 'DB4'}],
['d', r'Overall', 'TT', textables.val, {'sset': 'tt', 'dbse': 'DB4'}],
['l', r"""Error Distribution\footnotemark[1]""",
r"""\includegraphics[width=6.67cm,height=3.5mm]{%s%s.pdf}""" % (plotpath, 'blank'),
textables.graphics, {}],
['d', r'Time', '', textables.empty, {}]]
# TODO Time column not right at all
footnotes = [fnreservoir['blankslat']]
landscape = False
theme = 'smmerge'
title = r"""Interaction energy [kcal/mol] {{err}} subset statistics with computed with {{opt}}{0}.""".format(
'' if subjoin else r""" and {bas}""")
return rowplan, columnplan, landscape, footnotes, title, theme
def table_merge_suppmat(self, plotpath, subjoin):
"""Specialization of table_generic into table with as many statistics
as will fit (mostly fullcurve and a few 5min) plus embedded slat
diagram as suitable for supplementary material. Multiple tables are
formed, one for each in *bas* with lines *mtd* within each table.
"""
rowplan = ['bas', 'mtd']
columnplan = [
['l', r"""Method \& Basis Set""", '', textables.label, {}],
['d', 'S22', 'HB', textables.val, {'sset': 'hb', 'dbse': 'S22'}],
['d', 'S22', 'MX', textables.val, {'sset': 'mx', 'dbse': 'S22'}],
['d', 'S22', 'DD', textables.val, {'sset': 'dd', 'dbse': 'S22'}],
['d', 'S22', 'TT', textables.val, {'sset': 'tt', 'dbse': 'S22'}],
['d', 'NBC10', 'MX', textables.val, {'sset': 'mx', 'dbse': 'NBC1'}],
['d', 'NBC10', 'DD', textables.val, {'sset': 'dd', 'dbse': 'NBC1'}],
['d', 'NBC10', 'TT', textables.val, {'sset': 'tt', 'dbse': 'NBC1'}],
['d', 'HBC6', 'HB/TT', textables.val, {'sset': 'tt', 'dbse': 'HBC1'}],
['d', 'HSG', 'HB', textables.val, {'sset': 'hb', 'dbse': 'HSG'}],
['d', 'HSG', 'MX', textables.val, {'sset': 'mx', 'dbse': 'HSG'}],
['d', 'HSG', 'DD', textables.val, {'sset': 'dd', 'dbse': 'HSG'}],
['d', 'HSG', 'TT', textables.val, {'sset': 'tt', 'dbse': 'HSG'}],
['d', 'Avg', 'TT ', textables.val, {'sset': 'tt', 'dbse': 'DB4'}],
['l', r"""Error Distribution\footnotemark[1]""",
r"""\includegraphics[width=6.67cm,height=3.5mm]{%s%s.pdf}""" % (plotpath, 'blank'),
textables.graphics, {}],
['d', 'NBC10', r"""TT\footnotemark[2]""", textables.val, {'sset': 'tt-5min', 'dbse': 'NBC1'}],
['d', 'HBC6', r"""TT\footnotemark[2] """, textables.val, {'sset': 'tt-5min', 'dbse': 'HBC1'}],
['d', 'Avg', r"""TT\footnotemark[2]""", textables.val, {'sset': 'tt-5min', 'dbse': 'DB4'}]]
footnotes = [fnreservoir['blankslat'], fnreservoir['5min']]
landscape = True
theme = 'lgmerge'
title = r"""Interaction energy [kcal/mol] {{err}} subset statistics with computed with {{opt}}{0}.""".format(
'' if subjoin else r""" and {bas}""")
return rowplan, columnplan, landscape, footnotes, title, theme
class DB4(Database):
def __init__(self, pythonpath=None, loadfrompickle=False, path=None):
"""Initialize FourDatabases object from SuperDatabase"""
Database.__init__(self, ['s22', 'nbc10', 'hbc6', 'hsg'], dbse='DB4',
pythonpath=pythonpath, loadfrompickle=loadfrompickle, path=path)
# # load up data and definitions
# self.load_qcdata_byproject('dft')
# self.load_qcdata_byproject('pt2')
# #self.load_qcdata_byproject('dhdft')
# self.load_subsets()
self.define_supersubsets()
self.define_supermodelchems()
def define_supersubsets(self):
"""
"""
self.sset['tt'] = ['default', 'default', 'default', 'default']
self.sset['hb'] = ['hb', None, 'default', 'hb']
self.sset['mx'] = ['mx', 'mx', None, 'mx']
self.sset['dd'] = ['dd', 'dd', None, 'dd']
self.sset['mxdd'] = ['mxdd', 'default', None, 'mxdd']
self.sset['pp'] = ['mxddpp', 'mxddpp', None, None]
self.sset['np'] = ['mxddnp', 'mxddnp', None, 'mxdd']
self.sset['tt-5min'] = ['default', '5min', '5min', 'default']
self.sset['hb-5min'] = ['hb', None, '5min', 'hb']
self.sset['mx-5min'] = ['mx', 'mx-5min', None, 'mx']
self.sset['dd-5min'] = ['dd', 'dd-5min', None, 'dd']
self.sset['mxdd-5min'] = ['mxdd', '5min', None, 'mxdd']
self.sset['pp-5min'] = ['mxddpp', 'mxddpp-5min', None, None]
self.sset['np-5min'] = ['mxddnp', 'mxddnp-5min', None, 'mxdd']
# def benchmark(self):
# """Returns the model chemistry label for the database's benchmark."""
# return 'C2001BENCH'
def define_supermodelchems(self):
"""
"""
self.benchmark = 'C2011BENCH'
self.mcs['C2010BENCH'] = ['S22A', 'NBC100', 'HBC60', 'HSG0']
self.mcs['C2011BENCH'] = ['S22B', 'NBC10A', 'HBC6A', 'HSGA']
self.mcs['CCSD-CP-adz'] = ['CCSD-CP-adz', 'CCSD-CP-hadz', 'CCSD-CP-adz', 'CCSD-CP-hadz']
self.mcs['CCSD-CP-atz'] = ['CCSD-CP-atz', 'CCSD-CP-hatz', 'CCSD-CP-atz', 'CCSD-CP-hatz']
self.mcs['CCSD-CP-adtz'] = ['CCSD-CP-adtz', 'CCSD-CP-hadtz', 'CCSD-CP-adtz', 'CCSD-CP-hadtz']
self.mcs['CCSD-CP-adtzadz'] = ['CCSD-CP-adtzadz', 'CCSD-CP-adtzhadz', 'CCSD-CP-adtzadz', 'CCSD-CP-adtzhadz']
self.mcs['CCSD-CP-atzadz'] = ['CCSD-CP-atzadz', 'CCSD-CP-atzhadz', 'CCSD-CP-atzadz', 'CCSD-CP-atzhadz']
self.mcs['CCSD-CP-atqzadz'] = ['CCSD-CP-atqzadz', 'CCSD-CP-atqzhadz', 'CCSD-CP-atqzadz', 'CCSD-CP-atqzhadz']
self.mcs['CCSD-CP-atzadtz'] = ['CCSD-CP-atzadtz', 'CCSD-CP-atzhadtz', 'CCSD-CP-atzadtz', 'CCSD-CP-atzhadtz']
self.mcs['CCSD-CP-atqzadtz'] = ['CCSD-CP-atqzadtz', 'CCSD-CP-atqzhadtz', 'CCSD-CP-atqzadtz',
'CCSD-CP-atqzhadtz']
self.mcs['CCSD-CP-atqzatz'] = ['CCSD-CP-atqzatz', 'CCSD-CP-atqzhatz', 'CCSD-CP-atqzatz', 'CCSD-CP-atqzhatz']
self.mcs['SCSCCSD-CP-adz'] = ['SCSCCSD-CP-adz', 'SCSCCSD-CP-hadz', 'SCSCCSD-CP-adz', 'SCSCCSD-CP-hadz']
self.mcs['SCSCCSD-CP-atz'] = ['SCSCCSD-CP-atz', 'SCSCCSD-CP-hatz', 'SCSCCSD-CP-atz', 'SCSCCSD-CP-hatz']
self.mcs['SCSCCSD-CP-adtz'] = ['SCSCCSD-CP-adtz', 'SCSCCSD-CP-hadtz', 'SCSCCSD-CP-adtz', 'SCSCCSD-CP-hadtz']
self.mcs['SCSCCSD-CP-adtzadz'] = ['SCSCCSD-CP-adtzadz', 'SCSCCSD-CP-adtzhadz', 'SCSCCSD-CP-adtzadz',
'SCSCCSD-CP-adtzhadz']
self.mcs['SCSCCSD-CP-atzadz'] = ['SCSCCSD-CP-atzadz', 'SCSCCSD-CP-atzhadz', 'SCSCCSD-CP-atzadz',
'SCSCCSD-CP-atzhadz']
self.mcs['SCSCCSD-CP-atqzadz'] = ['SCSCCSD-CP-atqzadz', 'SCSCCSD-CP-atqzhadz', 'SCSCCSD-CP-atqzadz',
'SCSCCSD-CP-atqzhadz']
self.mcs['SCSCCSD-CP-atzadtz'] = ['SCSCCSD-CP-atzadtz', 'SCSCCSD-CP-atzhadtz', 'SCSCCSD-CP-atzadtz',
'SCSCCSD-CP-atzhadtz']
self.mcs['SCSCCSD-CP-atqzadtz'] = ['SCSCCSD-CP-atqzadtz', 'SCSCCSD-CP-atqzhadtz', 'SCSCCSD-CP-atqzadtz',
'SCSCCSD-CP-atqzhadtz']
self.mcs['SCSCCSD-CP-atqzatz'] = ['SCSCCSD-CP-atqzatz', 'SCSCCSD-CP-atqzhatz', 'SCSCCSD-CP-atqzatz',
'SCSCCSD-CP-atqzhatz']
self.mcs['SCSMICCSD-CP-adz'] = ['SCSMICCSD-CP-adz', 'SCSMICCSD-CP-hadz', 'SCSMICCSD-CP-adz',
'SCSMICCSD-CP-hadz']
self.mcs['SCSMICCSD-CP-atz'] = ['SCSMICCSD-CP-atz', 'SCSMICCSD-CP-hatz', 'SCSMICCSD-CP-atz',
'SCSMICCSD-CP-hatz']
self.mcs['SCSMICCSD-CP-adtz'] = ['SCSMICCSD-CP-adtz', 'SCSMICCSD-CP-hadtz', 'SCSMICCSD-CP-adtz',
'SCSMICCSD-CP-hadtz']
self.mcs['SCSMICCSD-CP-adtzadz'] = ['SCSMICCSD-CP-adtzadz', 'SCSMICCSD-CP-adtzhadz', 'SCSMICCSD-CP-adtzadz',
'SCSMICCSD-CP-adtzhadz']
self.mcs['SCSMICCSD-CP-atzadz'] = ['SCSMICCSD-CP-atzadz', 'SCSMICCSD-CP-atzhadz', 'SCSMICCSD-CP-atzadz',
'SCSMICCSD-CP-atzhadz']
self.mcs['SCSMICCSD-CP-atqzadz'] = ['SCSMICCSD-CP-atqzadz', 'SCSMICCSD-CP-atqzhadz', 'SCSMICCSD-CP-atqzadz',
'SCSMICCSD-CP-atqzhadz']
self.mcs['SCSMICCSD-CP-atzadtz'] = ['SCSMICCSD-CP-atzadtz', 'SCSMICCSD-CP-atzhadtz', 'SCSMICCSD-CP-atzadtz',
'SCSMICCSD-CP-atzhadtz']
self.mcs['SCSMICCSD-CP-atqzadtz'] = ['SCSMICCSD-CP-atqzadtz', 'SCSMICCSD-CP-atqzhadtz', 'SCSMICCSD-CP-atqzadtz',
'SCSMICCSD-CP-atqzhadtz']
self.mcs['SCSMICCSD-CP-atqzatz'] = ['SCSMICCSD-CP-atqzatz', 'SCSMICCSD-CP-atqzhatz', 'SCSMICCSD-CP-atqzatz',
'SCSMICCSD-CP-atqzhatz']
self.mcs['CCSDT-CP-adz'] = ['CCSDT-CP-adz', 'CCSDT-CP-hadz', 'CCSDT-CP-adz', 'CCSDT-CP-hadz']
self.mcs['CCSDT-CP-atz'] = ['CCSDT-CP-atz', 'CCSDT-CP-hatz', 'CCSDT-CP-atz', 'CCSDT-CP-hatz']
self.mcs['CCSDT-CP-adtz'] = ['CCSDT-CP-adtz', 'CCSDT-CP-hadtz', 'CCSDT-CP-adtz', 'CCSDT-CP-hadtz']
self.mcs['CCSDT-CP-adtzadz'] = ['CCSDT-CP-adtzadz', 'CCSDT-CP-adtzhadz', 'CCSDT-CP-adtzadz',
'CCSDT-CP-adtzhadz']
self.mcs['CCSDT-CP-atzadz'] = ['CCSDT-CP-atzadz', 'CCSDT-CP-atzhadz', 'CCSDT-CP-atzadz', 'CCSDT-CP-atzhadz']
self.mcs['CCSDT-CP-atqzadz'] = ['CCSDT-CP-atqzadz', 'CCSDT-CP-atqzhadz', 'CCSDT-CP-atqzadz',
'CCSDT-CP-atqzhadz']
self.mcs['CCSDT-CP-atzadtz'] = ['CCSDT-CP-atzadtz', 'CCSDT-CP-atzhadtz', 'CCSDT-CP-atzadtz',
'CCSDT-CP-atzhadtz']
self.mcs['CCSDT-CP-atqzadtz'] = ['CCSDT-CP-atqzadtz', 'CCSDT-CP-atqzhadtz', 'CCSDT-CP-atqzadtz',
'CCSDT-CP-atqzhadtz']
self.mcs['CCSDT-CP-atqzatz'] = ['CCSDT-CP-atqzatz', 'CCSDT-CP-atqzhatz', 'CCSDT-CP-atqzatz',
'CCSDT-CP-atqzhatz']
# def make_pt2_flats(self):
# def plot_all_flats(self):
# """Generate pieces for inclusion into tables for PT2 paper.
# Note that DB4 flats use near-equilibrium subset.
#
# """
# Database.plot_all_flats(self, modelchem=None, sset='tt-5min', xlimit=4.0,
# graphicsformat=['pdf'])
def make_pt2_Figure_3(self):
"""Plot all the graphics needed for the calendar grey bars plot
in Fig. 3 of PT2.
Note that in the modern implementation of class DB4, would need to
pass ``sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min']`` to get
published figure.
"""
# Fig. bars (a)
self.plot_bars(['MP2-CP-dz', 'MP2-CP-jadz', 'MP2-CP-hadz', 'MP2-CP-adz',
'MP2-CP-tz', 'MP2-CP-matz', 'MP2-CP-jatz', 'MP2-CP-hatz', 'MP2-CP-atz',
'MP2-CP-dtz', 'MP2-CP-jadtz', 'MP2-CP-hadtz', 'MP2-CP-adtz',
'MP2-CP-qz', 'MP2-CP-aaqz', 'MP2-CP-maqz', 'MP2-CP-jaqz', 'MP2-CP-haqz', 'MP2-CP-aqz',
'MP2-CP-tqz', 'MP2-CP-matqz', 'MP2-CP-jatqz', 'MP2-CP-hatqz', 'MP2-CP-atqz',
'MP2-CP-a5z', 'MP2-CP-aq5z'])
self.plot_bars(['SCSMP2-CP-dz', 'SCSMP2-CP-jadz', 'SCSMP2-CP-hadz', 'SCSMP2-CP-adz',
'SCSMP2-CP-tz', 'SCSMP2-CP-matz', 'SCSMP2-CP-jatz', 'SCSMP2-CP-hatz', 'SCSMP2-CP-atz',
'SCSMP2-CP-dtz', 'SCSMP2-CP-jadtz', 'SCSMP2-CP-hadtz', 'SCSMP2-CP-adtz',
'SCSMP2-CP-qz', 'SCSMP2-CP-aaqz', 'SCSMP2-CP-maqz', 'SCSMP2-CP-jaqz', 'SCSMP2-CP-haqz',
'SCSMP2-CP-aqz',
'SCSMP2-CP-tqz', 'SCSMP2-CP-matqz', 'SCSMP2-CP-jatqz', 'SCSMP2-CP-hatqz', 'SCSMP2-CP-atqz',
'SCSMP2-CP-a5z', 'SCSMP2-CP-aq5z'])
self.plot_bars(['SCSNMP2-CP-dz', 'SCSNMP2-CP-jadz', 'SCSNMP2-CP-hadz', 'SCSNMP2-CP-adz',
'SCSNMP2-CP-tz', 'SCSNMP2-CP-matz', 'SCSNMP2-CP-jatz', 'SCSNMP2-CP-hatz', 'SCSNMP2-CP-atz',
'SCSNMP2-CP-dtz', 'SCSNMP2-CP-jadtz', 'SCSNMP2-CP-hadtz', 'SCSNMP2-CP-adtz',
'SCSNMP2-CP-qz', 'SCSNMP2-CP-aaqz', 'SCSNMP2-CP-maqz', 'SCSNMP2-CP-jaqz', 'SCSNMP2-CP-haqz',
'SCSNMP2-CP-aqz',
'SCSNMP2-CP-tqz', 'SCSNMP2-CP-matqz', 'SCSNMP2-CP-jatqz', 'SCSNMP2-CP-hatqz', 'SCSNMP2-CP-atqz',
'SCSNMP2-CP-a5z', 'SCSNMP2-CP-aq5z'])
self.plot_bars([None, None, None, None,
'SCSMIMP2-CP-tz', 'SCSMIMP2-CP-matz', 'SCSMIMP2-CP-jatz', 'SCSMIMP2-CP-hatz', 'SCSMIMP2-CP-atz',
'SCSMIMP2-CP-dtz', 'SCSMIMP2-CP-jadtz', 'SCSMIMP2-CP-hadtz', 'SCSMIMP2-CP-adtz',
'SCSMIMP2-CP-qz', 'SCSMIMP2-CP-aaqz', 'SCSMIMP2-CP-maqz', 'SCSMIMP2-CP-jaqz',
'SCSMIMP2-CP-haqz', 'SCSMIMP2-CP-aqz',
'SCSMIMP2-CP-tqz', 'SCSMIMP2-CP-matqz', 'SCSMIMP2-CP-jatqz', 'SCSMIMP2-CP-hatqz',
'SCSMIMP2-CP-atqz',
None, None])
self.plot_bars(['DWMP2-CP-dz', 'DWMP2-CP-jadz', 'DWMP2-CP-hadz', 'DWMP2-CP-adz',
'DWMP2-CP-tz', 'DWMP2-CP-matz', 'DWMP2-CP-jatz', 'DWMP2-CP-hatz', 'DWMP2-CP-atz',
'DWMP2-CP-dtz', 'DWMP2-CP-jadtz', 'DWMP2-CP-hadtz', 'DWMP2-CP-adtz',
'DWMP2-CP-qz', 'DWMP2-CP-aaqz', 'DWMP2-CP-maqz', 'DWMP2-CP-jaqz', 'DWMP2-CP-haqz',
'DWMP2-CP-aqz',
'DWMP2-CP-tqz', 'DWMP2-CP-matqz', 'DWMP2-CP-jatqz', 'DWMP2-CP-hatqz', 'DWMP2-CP-atqz',
'DWMP2-CP-a5z', 'DWMP2-CP-aq5z'])
self.plot_bars(['MP2C-CP-dz', 'MP2C-CP-jadz', 'MP2C-CP-hadz', 'MP2C-CP-adz',
'MP2C-CP-tz', 'MP2C-CP-matz', 'MP2C-CP-jatz', 'MP2C-CP-hatz', 'MP2C-CP-atz',
'MP2C-CP-dtz', 'MP2C-CP-jadtz', 'MP2C-CP-hadtz', 'MP2C-CP-adtz',
None, None, None, None, None, 'MP2C-CP-aqz',
None, None, None, None, 'MP2C-CP-atqz',
None, None])
self.plot_bars(['MP2C-CP-atqzdz', 'MP2C-CP-atqzjadz', 'MP2C-CP-atqzhadz', 'MP2C-CP-atqzadz',
'MP2C-CP-atqztz', 'MP2C-CP-atqzmatz', 'MP2C-CP-atqzjatz', 'MP2C-CP-atqzhatz', 'MP2C-CP-atqzatz',
'MP2C-CP-atqzdtz', 'MP2C-CP-atqzjadtz', 'MP2C-CP-atqzhadtz', 'MP2C-CP-atqzadtz'])
# Fig. bars (c)
self.plot_bars(['MP2F12-CP-dz', 'MP2F12-CP-jadz', 'MP2F12-CP-hadz', 'MP2F12-CP-adz',
'MP2F12-CP-tz', 'MP2F12-CP-matz', 'MP2F12-CP-jatz', 'MP2F12-CP-hatz', 'MP2F12-CP-atz',
'MP2F12-CP-dtz', 'MP2F12-CP-jadtz', 'MP2F12-CP-hadtz', 'MP2F12-CP-adtz',
'MP2F12-CP-aqz', 'MP2F12-CP-atqz'])
self.plot_bars(['SCSMP2F12-CP-dz', 'SCSMP2F12-CP-jadz', 'SCSMP2F12-CP-hadz', 'SCSMP2F12-CP-adz',
'SCSMP2F12-CP-tz', 'SCSMP2F12-CP-matz', 'SCSMP2F12-CP-jatz', 'SCSMP2F12-CP-hatz',
'SCSMP2F12-CP-atz',
'SCSMP2F12-CP-dtz', 'SCSMP2F12-CP-jadtz', 'SCSMP2F12-CP-hadtz', 'SCSMP2F12-CP-adtz',
'SCSMP2F12-CP-aqz', 'SCSMP2F12-CP-atqz'])
self.plot_bars(['SCSNMP2F12-CP-dz', 'SCSNMP2F12-CP-jadz', 'SCSNMP2F12-CP-hadz', 'SCSNMP2F12-CP-adz',
'SCSNMP2F12-CP-tz', 'SCSNMP2F12-CP-matz', 'SCSNMP2F12-CP-jatz', 'SCSNMP2F12-CP-hatz',
'SCSNMP2F12-CP-atz',
'SCSNMP2F12-CP-dtz', 'SCSNMP2F12-CP-jadtz', 'SCSNMP2F12-CP-adtz', 'SCSNMP2F12-CP-adtz',
'SCSNMP2F12-CP-aqz', 'SCSNMP2F12-CP-atqz'])
self.plot_bars([None, None, None, None,
'SCSMIMP2F12-CP-tz', 'SCSMIMP2F12-CP-matz', 'SCSMIMP2F12-CP-jatz', 'SCSMIMP2F12-CP-hatz',
'SCSMIMP2F12-CP-atz',
'SCSMIMP2F12-CP-dtz', 'SCSMIMP2F12-CP-jadtz', 'SCSMIMP2F12-CP-hadtz', 'SCSMIMP2F12-CP-adtz',
'SCSMIMP2F12-CP-aqz', 'SCSMIMP2F12-CP-atqz'])
self.plot_bars(['DWMP2F12-CP-dz', 'DWMP2F12-CP-jadz', 'DWMP2F12-CP-hadz', 'DWMP2F12-CP-adz',
'DWMP2F12-CP-tz', 'DWMP2F12-CP-matz', 'DWMP2F12-CP-jatz', 'DWMP2F12-CP-hatz', 'DWMP2F12-CP-atz',
'DWMP2F12-CP-dtz', 'DWMP2F12-CP-jadtz', 'DWMP2F12-CP-hadtz', 'DWMP2F12-CP-adtz',
'DWMP2F12-CP-aqz', 'DWMP2F12-CP-atqz'])
self.plot_bars(['MP2CF12-CP-dz', 'MP2CF12-CP-jadz', 'MP2CF12-CP-hadz', 'MP2CF12-CP-adz',
'MP2CF12-CP-tz', 'MP2CF12-CP-matz', 'MP2CF12-CP-jatz', 'MP2CF12-CP-hatz', 'MP2CF12-CP-atz',
'MP2CF12-CP-dtz', 'MP2CF12-CP-jadtz', 'MP2CF12-CP-hadtz', 'MP2CF12-CP-adtz',
'MP2CF12-CP-aqz', 'MP2CF12-CP-atqz'])
self.plot_bars(['MP2CF12-CP-atqzdz', 'MP2CF12-CP-atqzjadz', 'MP2CF12-CP-atqzhadz', 'MP2CF12-CP-atqzadz',
'MP2CF12-CP-atqztz', 'MP2CF12-CP-atqzmatz', 'MP2CF12-CP-atqzjatz', 'MP2CF12-CP-atqzhatz',
'MP2CF12-CP-atqzatz',
'MP2CF12-CP-atqzdtz', 'MP2CF12-CP-atqzjadtz', 'MP2CF12-CP-atqzhadtz', 'MP2CF12-CP-atqzadtz'])
def make_pt2_Figure_2(self):
"""Plot all the graphics needed for the diffuse augmented grey
bars plot in Fig. 2 of PT2.
Note that in the modern implementation of class DB4, would need to
pass ``sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min']`` to get
published figure.
"""
# Fig. bars (a)
self.plot_bars(['MP2-CP-adz', 'MP2-CP-atz', 'MP2-CP-adtz',
'MP2-CP-aqz', 'MP2-CP-atqz', 'MP2-CP-a5z', 'MP2-CP-aq5z'])
self.plot_bars(['SCSMP2-CP-adz', 'SCSMP2-CP-atz',
'SCSMP2-CP-adtz', 'SCSMP2-CP-aqz', 'SCSMP2-CP-atqz',
'SCSMP2-CP-a5z', 'SCSMP2-CP-aq5z'])
self.plot_bars(['SCSNMP2-CP-adz', 'SCSNMP2-CP-atz',
'SCSNMP2-CP-adtz', 'SCSNMP2-CP-aqz', 'SCSNMP2-CP-atqz',
'SCSNMP2-CP-a5z', 'SCSNMP2-CP-aq5z'])
self.plot_bars(['SCSMIMP2-CP-atz', 'SCSMIMP2-CP-atz',
'SCSMIMP2-CP-adtz', 'SCSMIMP2-CP-aqz', 'SCSMIMP2-CP-atqz'])
self.plot_bars(['SCSMIMP2-CP-tz', 'SCSMIMP2-CP-tz',
'SCSMIMP2-CP-dtz', 'SCSMIMP2-CP-qz', 'SCSMIMP2-CP-tqz'])
self.plot_bars(['DWMP2-CP-adz', 'DWMP2-CP-atz', 'DWMP2-CP-adtz',
'DWMP2-CP-aqz', 'DWMP2-CP-atqz', 'DWMP2-CP-a5z', 'DWMP2-CP-aq5z'])
self.plot_bars(['MP2C-CP-adz', 'MP2C-CP-adtzadz',
'MP2C-CP-atqzadz', 'MP2C-CP-aq5zadz', 'MP2C-CP-atz',
'MP2C-CP-atqzatz', 'MP2C-CP-aq5zatz', 'MP2C-CP-adtz',
'MP2C-CP-atqzadtz', 'MP2C-CP-aqz', 'MP2C-CP-atqz'])
# Fig. bars (b)
self.plot_bars(['MP3-CP-adz', 'MP3-CP-adtzadz', 'MP3-CP-atqzadz',
'MP3-CP-atz', 'MP3-CP-atqzatz', 'MP3-CP-adtz', 'MP3-CP-atqzadtz'])
self.plot_bars(['MP25-CP-adz', 'MP25-CP-adtzadz', 'MP25-CP-atqzadz',
'MP25-CP-atz', 'MP25-CP-atqzatz', 'MP25-CP-adtz', 'MP25-CP-atqzadtz'])
self.plot_bars(['CCSD-CP-adz', 'CCSD-CP-adtzadz', 'CCSD-CP-atqzadz',
'CCSD-CP-atz', 'CCSD-CP-atqzatz', 'CCSD-CP-adtz', 'CCSD-CP-atqzadtz'])
self.plot_bars(['SCSCCSD-CP-adz', 'SCSCCSD-CP-adtzadz',
'SCSCCSD-CP-atqzadz', 'SCSCCSD-CP-atz', 'SCSCCSD-CP-atqzatz',
'SCSCCSD-CP-adtz', 'SCSCCSD-CP-atqzadtz'])
self.plot_bars(['SCSMICCSD-CP-adz', 'SCSMICCSD-CP-adtzadz',
'SCSMICCSD-CP-atqzadz', 'SCSMICCSD-CP-atz', 'SCSMICCSD-CP-atqzatz',
'SCSMICCSD-CP-adtz', 'SCSMICCSD-CP-atqzadtz'])
self.plot_bars(['CCSDT-CP-adz', 'CCSDT-CP-adtzadz',
'CCSDT-CP-atqzadz', 'CCSDT-CP-atz', 'CCSDT-CP-atqzatz',
'CCSDT-CP-adtz', 'CCSDT-CP-atqzadtz'])
# Fig. bars (c)
self.plot_bars(['MP2F12-CP-adz', 'MP2F12-CP-atz', 'MP2F12-CP-adtz',
'MP2F12-CP-aqz', 'MP2F12-CP-atqz'])
self.plot_bars(['SCSMP2F12-CP-adz', 'SCSMP2F12-CP-atz',
'SCSMP2F12-CP-adtz', 'SCSMP2F12-CP-aqz', 'SCSMP2F12-CP-atqz'])
self.plot_bars(['SCSNMP2F12-CP-adz', 'SCSNMP2F12-CP-atz',
'SCSNMP2F12-CP-adtz', 'SCSNMP2F12-CP-aqz',
'SCSNMP2F12-CP-atqz'])
self.plot_bars(['SCSMIMP2F12-CP-atz', 'SCSMIMP2F12-CP-atz',
'SCSMIMP2F12-CP-adtz', 'SCSMIMP2F12-CP-aqz',
'SCSMIMP2F12-CP-atqz'])
self.plot_bars(['SCSMIMP2F12-CP-tz', 'SCSMIMP2F12-CP-tz', 'SCSMIMP2F12-CP-dtz'])
self.plot_bars(['DWMP2F12-CP-adz', 'DWMP2F12-CP-atz',
'DWMP2F12-CP-adtz', 'DWMP2F12-CP-aqz', 'DWMP2F12-CP-atqz'])
self.plot_bars(['MP2CF12-CP-adz', 'MP2CF12-CP-adtzadz',
'MP2CF12-CP-atqzadz', 'MP2CF12-CP-atz', 'MP2CF12-CP-atqzatz',
'MP2CF12-CP-adtz', 'MP2CF12-CP-atqzadtz', 'MP2CF12-CP-aqz',
'MP2CF12-CP-atqz'])
# Fig. bars (d)
self.plot_bars(['CCSDAF12-CP-adz', 'CCSDAF12-CP-adtzadz', 'CCSDAF12-CP-atqzadz'])
self.plot_bars(['CCSDBF12-CP-adz', 'CCSDBF12-CP-adtzadz', 'CCSDBF12-CP-atqzadz'])
self.plot_bars(['SCSCCSDAF12-CP-adz', 'SCSCCSDAF12-CP-adtzadz', 'SCSCCSDAF12-CP-atqzadz'])
self.plot_bars(['SCSCCSDBF12-CP-adz', 'SCSCCSDBF12-CP-adtzadz', 'SCSCCSDBF12-CP-atqzadz'])
self.plot_bars(['SCMICCSDAF12-CP-adz', 'SCMICCSDAF12-CP-adtzadz', 'SCMICCSDAF12-CP-atqzadz'])
self.plot_bars(['SCMICCSDBF12-CP-adz', 'SCMICCSDBF12-CP-adtzadz', 'SCMICCSDBF12-CP-atqzadz'])
self.plot_bars(['CCSDTAF12-CP-adz', 'CCSDTAF12-CP-adtzadz', 'CCSDTAF12-CP-atqzadz'])
self.plot_bars(['CCSDTBF12-CP-adz', 'CCSDTBF12-CP-adtzadz', 'CCSDTBF12-CP-atqzadz'])
self.plot_bars(['DWCCSDTF12-CP-adz', 'DWCCSDTF12-CP-adtzadz', 'DWCCSDTF12-CP-atqzadz'])
def plot_dhdft_flats(self):
"""Generate pieces for grey bars figure for DH-DFT paper."""
self.plot_all_flats(
['B97D3-CP-adz', 'PBED3-CP-adz', 'M11L-CP-adz', 'DLDFD-CP-adz', 'B3LYPD3-CP-adz', 'PBE0D3-CP-adz',
'WB97XD-CP-adz', 'M052X-CP-adz', 'M062X-CP-adz', 'M08HX-CP-adz', 'M08SO-CP-adz', 'M11-CP-adz',
'VV10-CP-adz',
'LCVV10-CP-adz', 'WB97XV-CP-adz', 'PBE02-CP-adz', 'WB97X2-CP-adz', 'B2PLYPD3-CP-adz',
'DSDPBEP86D2OPT-CP-adz', 'MP2-CP-adz'], sset='tt-5min')
self.plot_all_flats(['B97D3-unCP-adz', 'PBED3-unCP-adz', 'M11L-unCP-adz', 'DLDFD-unCP-adz', 'B3LYPD3-unCP-adz',
'PBE0D3-unCP-adz',
'WB97XD-unCP-adz', 'M052X-unCP-adz', 'M062X-unCP-adz', 'M08HX-unCP-adz', 'M08SO-unCP-adz',
'M11-unCP-adz', 'VV10-unCP-adz',
'LCVV10-unCP-adz', 'WB97XV-unCP-adz', 'PBE02-unCP-adz', 'WB97X2-unCP-adz',
'B2PLYPD3-unCP-adz', 'DSDPBEP86D2OPT-unCP-adz', 'MP2-unCP-adz'], sset='tt-5min')
self.plot_all_flats(
['B97D3-CP-atz', 'PBED3-CP-atz', 'M11L-CP-atz', 'DLDFD-CP-atz', 'B3LYPD3-CP-atz', 'PBE0D3-CP-atz',
'WB97XD-CP-atz', 'M052X-CP-atz', 'M062X-CP-atz', 'M08HX-CP-atz', 'M08SO-CP-atz', 'M11-CP-atz',
'VV10-CP-atz',
'LCVV10-CP-atz', 'WB97XV-CP-atz', 'PBE02-CP-atz', 'WB97X2-CP-atz', 'B2PLYPD3-CP-atz',
'DSDPBEP86D2OPT-CP-atz', 'MP2-CP-atz'], sset='tt-5min')
self.plot_all_flats(['B97D3-unCP-atz', 'PBED3-unCP-atz', 'M11L-unCP-atz', 'DLDFD-unCP-atz', 'B3LYPD3-unCP-atz',
'PBE0D3-unCP-atz',
'WB97XD-unCP-atz', 'M052X-unCP-atz', 'M062X-unCP-atz', 'M08HX-unCP-atz', 'M08SO-unCP-atz',
'M11-unCP-atz', 'VV10-unCP-atz',
'LCVV10-unCP-atz', 'WB97XV-unCP-atz', 'PBE02-unCP-atz', 'WB97X2-unCP-atz',
'B2PLYPD3-unCP-atz', 'DSDPBEP86D2OPT-unCP-atz', 'MP2-unCP-atz'], sset='tt-5min')
def make_dhdft_Figure_1(self):
"""Plot all the graphics needed for the grey bars plot
in Fig. 1 of DHDFT.
"""
# Fig. bars (a)
self.plot_bars([
'M052X-unCP-adz', 'M052X-CP-adz', 'M052X-unCP-atz', 'M052X-CP-atz', None,
'M062X-unCP-adz', 'M062X-CP-adz', 'M062X-unCP-atz', 'M062X-CP-atz', None,
'M08SO-unCP-adz', 'M08SO-CP-adz', 'M08SO-unCP-atz', 'M08SO-CP-atz', None,
'M08HX-unCP-adz', 'M08HX-CP-adz', 'M08HX-unCP-atz', 'M08HX-CP-atz', None,
'M11-unCP-adz', 'M11-CP-adz', 'M11-unCP-atz', 'M11-CP-atz', None,
'M11L-unCP-adz', 'M11L-CP-adz', 'M11L-unCP-atz', 'M11L-CP-atz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
# Fig. bars (b)
self.plot_bars([
'PBED3-unCP-adz', 'PBED3-CP-adz', 'PBED3-unCP-atz', 'PBED3-CP-atz', None,
'B97D3-unCP-adz', 'B97D3-CP-adz', 'B97D3-unCP-atz', 'B97D3-CP-atz', None,
'PBE0D3-unCP-adz', 'PBE0D3-CP-adz', 'PBE0D3-unCP-atz', 'PBE0D3-CP-atz', None,
'B3LYPD3-unCP-adz', 'B3LYPD3-CP-adz', 'B3LYPD3-unCP-atz', 'B3LYPD3-CP-atz', None,
'DLDFD-unCP-adz', 'DLDFD-CP-adz', 'DLDFD-unCP-atz', 'DLDFD-CP-atz', None,
'WB97XD-unCP-adz', 'WB97XD-CP-adz', 'WB97XD-unCP-atz', 'WB97XD-CP-atz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
# Fig. bars (c)
self.plot_bars([
'VV10-unCP-adz', 'VV10-CP-adz', 'VV10-unCP-atz', 'VV10-CP-atz', None, None,
'LCVV10-unCP-adz', 'LCVV10-CP-adz', 'LCVV10-unCP-atz', 'LCVV10-CP-atz', None, None,
'WB97XV-unCP-adz', 'WB97XV-CP-adz', 'WB97XV-unCP-atz', 'WB97XV-CP-atz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
# Fig. bars (d)
self.plot_bars([
'PBE02-unCP-adz', 'PBE02-CP-adz', 'PBE02-unCP-atz', 'PBE02-CP-atz', None,
'WB97X2-unCP-adz', 'WB97X2-CP-adz', 'WB97X2-unCP-atz', 'WB97X2-CP-atz', None,
'B2PLYPD3-unCP-adz', 'B2PLYPD3-CP-adz', 'B2PLYPD3-unCP-atz', 'B2PLYPD3-CP-atz', None,
'DSDPBEP86D2OPT-unCP-adz', 'DSDPBEP86D2OPT-CP-adz', 'DSDPBEP86D2OPT-unCP-atz', 'DSDPBEP86D2OPT-CP-atz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
# Fig. bars (e)
self.plot_bars([
'MP2-unCP-adz', 'MP2-CP-adz', 'MP2-unCP-atz', 'MP2-CP-atz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'])
def make_dhdft_Figure_2(self):
"""Plot all the graphics needed for the SAPT/DFT/WFN
comparison plot in Fig. 2 of DHDFT.
Note that benchmark set as reminder, not necessity, since default.
"""
self.plot_bars([
'SAPT0S-CP-jadz', 'SAPTDFT-CP-atz', 'SAPT2P-CP-adz', 'SAPT3M-CP-atz',
'SAPT2PCM-CP-atz', None, 'B97D3-unCP-atz', 'B3LYPD3-CP-adz',
'M052X-unCP-adz', 'WB97XD-CP-atz', 'WB97XV-CP-adz', 'WB97X2-CP-atz',
'DSDPBEP86D2OPT-CP-atz', 'B2PLYPD3-CP-atz', None, 'MP2-CP-atz',
'SCSMP2-CP-atz', 'SCSMIMP2-CP-qz', 'MP2C-CP-atqzadz',
'MP2CF12-CP-adz', 'SCMICCSDAF12-CP-adz', 'CCSDT-CP-atz',
'CCSDT-CP-atqzatz', 'DWCCSDTF12-CP-adz'],
sset=['tt-5min', 'hb-5min', 'mx-5min', 'dd-5min'],
benchmark='C2011BENCH')
def plot_dhdft_modelchems(self):
self.plot_modelchems(
['B97D3-CP-adz', 'PBED3-CP-adz', 'M11L-CP-adz', 'DLDFD-CP-adz', 'B3LYPD3-CP-adz', 'PBE0D3-CP-adz',
'WB97XD-CP-adz', 'M052X-CP-adz', 'M062X-CP-adz', 'M08HX-CP-adz', 'M08SO-CP-adz', 'M11-CP-adz',
'VV10-CP-adz',
'LCVV10-CP-adz', 'WB97XV-CP-adz', 'PBE02-CP-adz', 'WB97X2-CP-adz', 'B2PLYPD3-CP-adz',
'DSDPBEP86D2OPT-CP-adz', 'MP2-CP-adz'], sset='tt-5min')
self.plot_modelchems(['B97D3-unCP-adz', 'PBED3-unCP-adz', 'M11L-unCP-adz', 'DLDFD-unCP-adz', 'B3LYPD3-unCP-adz',
'PBE0D3-unCP-adz',
'WB97XD-unCP-adz', 'M052X-unCP-adz', 'M062X-unCP-adz', 'M08HX-unCP-adz', 'M08SO-unCP-adz',
'M11-unCP-adz', 'VV10-unCP-adz',
'LCVV10-unCP-adz', 'WB97XV-unCP-adz', 'PBE02-unCP-adz', 'WB97X2-unCP-adz',
'B2PLYPD3-unCP-adz', 'DSDPBEP86D2OPT-unCP-adz', 'MP2-unCP-adz'], sset='tt-5min')
self.plot_modelchems(
['B97D3-CP-atz', 'PBED3-CP-atz', 'M11L-CP-atz', 'DLDFD-CP-atz', 'B3LYPD3-CP-atz', 'PBE0D3-CP-atz',
'WB97XD-CP-atz', 'M052X-CP-atz', 'M062X-CP-atz', 'M08HX-CP-atz', 'M08SO-CP-atz', 'M11-CP-atz',
'VV10-CP-atz',
'LCVV10-CP-atz', 'WB97XV-CP-atz', 'PBE02-CP-atz', 'WB97X2-CP-atz', 'B2PLYPD3-CP-atz',
'DSDPBEP86D2OPT-CP-atz', 'MP2-CP-atz'], sset='tt-5min')
self.plot_modelchems(['B97D3-unCP-atz', 'PBED3-unCP-atz', 'M11L-unCP-atz', 'DLDFD-unCP-atz', 'B3LYPD3-unCP-atz',
'PBE0D3-unCP-atz',
'WB97XD-unCP-atz', 'M052X-unCP-atz', 'M062X-unCP-atz', 'M08HX-unCP-atz', 'M08SO-unCP-atz',
'M11-unCP-atz', 'VV10-unCP-atz',
'LCVV10-unCP-atz', 'WB97XV-unCP-atz', 'PBE02-unCP-atz', 'WB97X2-unCP-atz',
'B2PLYPD3-unCP-atz', 'DSDPBEP86D2OPT-unCP-atz', 'MP2-unCP-atz'], sset='tt-5min')
def plot_minn_modelchems(self):
self.plot_modelchems(
['DLDFD-unCP-adz', 'M052X-unCP-adz', 'M062X-unCP-adz', 'M08HX-unCP-adz', 'M08SO-unCP-adz', 'M11-unCP-adz',
'M11L-unCP-adz',
'DLDFD-CP-adz', 'M052X-CP-adz', 'M062X-CP-adz', 'M08HX-CP-adz', 'M08SO-CP-adz', 'M11-CP-adz',
'M11L-CP-adz'])
self.plot_modelchems(
['DlDFD-unCP-atz', 'M052X-unCP-atz', 'M062X-unCP-atz', 'M08HX-unCP-atz', 'M08SO-unCP-atz', 'M11-unCP-atz',
'M11L-unCP-atz',
'DLDFD-CP-atz', 'M052X-CP-atz', 'M062X-CP-atz', 'M08HX-CP-atz', 'M08SO-CP-atz', 'M11-CP-atz',
'M11L-CP-atz'])
def make_dhdft_Table_I(self):
"""Generate the in-manuscript summary slat table for DHDFT.
"""
self.table_wrapper(mtd=['B97D3', 'PBED3', 'M11L', 'DLDFD', 'B3LYPD3',
'PBE0D3', 'WB97XD', 'M052X', 'M062X', 'M08HX',
'M08SO', 'M11', 'VV10', 'LCVV10', 'WB97XV',
'PBE02', 'WB97X2', 'DSDPBEP86D2OPT', 'B2PLYPD3',
'MP2', 'SCSNMP2', 'SCSMIMP2', 'MP2CF12', 'SCMICCSDAF12',
'SAPTDFT', 'SAPT0S', 'SAPT2P', 'SAPT3M', 'SAPT2PCM'],
bas=['adz', 'atz'],
tableplan=self.table_scrunch,
opt=['CP', 'unCP'], err=['mae'],
subjoin=None,
plotpath=None,
standalone=False, filename='tblssets_ex1')
def make_dhdft_Table_II(self):
"""Generate the in-manuscript CP slat table for DHDFT.
"""
self.table_wrapper(mtd=['B97D3', 'PBED3', 'M11L', 'DLDFD', 'B3LYPD3',
'PBE0D3', 'WB97XD', 'M052X', 'M062X', 'M08HX',
'M08SO', 'M11', 'VV10', 'LCVV10', 'WB97XV',
'PBE02', 'WB97X2', 'DSDPBEP86D2OPT', 'B2PLYPD3', 'MP2'],
bas=['adz', 'atz'],
tableplan=self.table_merge_abbr,
opt=['CP'], err=['mae'],
subjoin=True,
plotpath='analysis/flats/mplflat_', # proj still has 'mpl' prefix
standalone=False, filename='tblssets_ex2')
def make_dhdft_Table_III(self):
"""Generate the in-manuscript unCP slat table for DHDFT.
"""
self.table_wrapper(mtd=['B97D3', 'PBED3', 'M11L', 'DLDFD', 'B3LYPD3',
'PBE0D3', 'WB97XD', 'M052X', 'M062X', 'M08HX',
'M08SO', 'M11', 'VV10', 'LCVV10', 'WB97XV',
'PBE02', 'WB97X2', 'DSDPBEP86D2OPT', 'B2PLYPD3', 'MP2'],
bas=['adz', 'atz'],
tableplan=self.table_merge_abbr,
opt=['unCP'], err=['mae'],
subjoin=True,
plotpath='analysis/flats/mplflat_', # proj still has 'mpl' prefix
standalone=False, filename='tblssets_ex3')
def make_dhdft_Tables_SII(self):
"""Generate the subset details suppmat Part II tables and their indices for DHDFT.
"""
self.table_wrapper(mtd=['B97D3', 'PBED3', 'M11L', 'DLDFD', 'B3LYPD3',
'PBE0D3', 'WB97XD', 'M052X', 'M062X', 'M08HX',
'M08SO', 'M11', 'VV10', 'LCVV10', 'WB97XV',
'PBE02', 'WB97X2', 'DSDPBEP86D2OPT', 'B2PLYPD3'], # 'MP2']
bas=['adz', 'atz'],
tableplan=self.table_merge_suppmat,
opt=['CP', 'unCP'], err=['mae', 'mape'],
subjoin=False,
plotpath='analysis/flats/mplflat_', # proj still has 'mpl' prefix
standalone=False, filename='tblssets')
def make_dhdft_Tables_SIII(self):
"""Generate the per-reaction suppmat Part III tables and their indices for DHDFT.
"""
self.table_reactions(
['B97D3-unCP-adz', 'B97D3-CP-adz', 'B97D3-unCP-atz', 'B97D3-CP-atz',
'PBED3-unCP-adz', 'PBED3-CP-adz', 'PBED3-unCP-atz', 'PBED3-CP-atz',
'M11L-unCP-adz', 'M11L-CP-adz', 'M11L-unCP-atz', 'M11L-CP-atz',
'DLDFD-unCP-adz', 'DLDFD-CP-adz', 'DLDFD-unCP-atz', 'DLDFD-CP-atz',
'B3LYPD3-unCP-adz', 'B3LYPD3-CP-adz', 'B3LYPD3-unCP-atz', 'B3LYPD3-CP-atz',
'PBE0D3-unCP-adz', 'PBE0D3-CP-adz', 'PBE0D3-unCP-atz', 'PBE0D3-CP-atz',
'WB97XD-unCP-adz', 'WB97XD-CP-adz', 'WB97XD-unCP-atz', 'WB97XD-CP-atz',
'M052X-unCP-adz', 'M052X-CP-adz', 'M052X-unCP-atz', 'M052X-CP-atz',
'M062X-unCP-adz', 'M062X-CP-adz', 'M062X-unCP-atz', 'M062X-CP-atz',
'M08HX-unCP-adz', 'M08HX-CP-adz', 'M08HX-unCP-atz', 'M08HX-CP-atz',
'M08SO-unCP-adz', 'M08SO-CP-adz', 'M08SO-unCP-atz', 'M08SO-CP-atz',
'M11-unCP-adz', 'M11-CP-adz', 'M11-unCP-atz', 'M11-CP-atz',
'VV10-unCP-adz', 'VV10-CP-adz', 'VV10-unCP-atz', 'VV10-CP-atz',
'LCVV10-unCP-adz', 'LCVV10-CP-adz', 'LCVV10-unCP-atz', 'LCVV10-CP-atz',
'WB97XV-unCP-adz', 'WB97XV-CP-adz', 'WB97XV-unCP-atz', 'WB97XV-CP-atz',
'PBE02-unCP-adz', 'PBE02-CP-adz', 'PBE02-unCP-atz', 'PBE02-CP-atz',
'WB97X2-unCP-adz', 'WB97X2-CP-adz', 'WB97X2-unCP-atz', 'WB97X2-CP-atz',
'DSDPBEP86D2OPT-unCP-adz', 'DSDPBEP86D2OPT-CP-adz', 'DSDPBEP86D2OPT-unCP-atz', 'DSDPBEP86D2OPT-CP-atz',
'B2PLYPD3-unCP-adz', 'B2PLYPD3-CP-adz', 'B2PLYPD3-unCP-atz', 'B2PLYPD3-CP-atz'],
# 'MP2-unCP-adz', 'MP2-CP-adz', 'MP2-unCP-atz', 'MP2-CP-atz'],
standalone=False, filename='tblrxn_all')
class ThreeDatabases(Database):
"""
"""
def __init__(self, pythonpath=None):
"""Initialize ThreeDatabases object from Database"""
Database.__init__(self, ['s22', 'a24', 'hsg'], dbse='DB3', pythonpath=None)
# load up data and definitions
self.load_qcdata_byproject('pt2')
self.load_qcdata_byproject('dilabio')
self.load_qcdata_byproject('f12dilabio')
self.load_subsets()
self.define_supersubsets()
self.define_supermodelchems()
def define_supersubsets(self):
"""
"""
self.sset['tt'] = ['default', 'default', 'default']
self.sset['hb'] = ['hb', 'hb', 'hb']
self.sset['mx'] = ['mx', 'mx', 'mx']
self.sset['dd'] = ['dd', 'dd', 'dd']
self.sset['mxdd'] = ['mxdd', 'mxdd', 'mxdd']
self.sset['pp'] = ['mxddpp', 'mxddpp', 'mxddpp']
self.sset['np'] = ['mxddnp', 'mxddnp', 'mxddnp']
self.sset['tt-5min'] = ['default', 'default', 'default']
self.sset['hb-5min'] = ['hb', 'hb', 'hb']
self.sset['mx-5min'] = ['mx', 'mx', 'mx']
self.sset['dd-5min'] = ['dd', 'dd', 'dd']
self.sset['mxdd-5min'] = ['mxdd', 'mxdd', 'mxdd']
self.sset['pp-5min'] = ['mxddpp', 'mxddpp', 'mxddpp']
self.sset['np-5min'] = ['mxddnp', 'mxddnp', 'mxddnp']
self.sset['weak'] = ['weak', 'weak', 'weak']
self.sset['weak_hb'] = ['weak_hb', None, 'weak_hb']
self.sset['weak_mx'] = ['weak_mx', 'weak_mx', 'weak_mx']
self.sset['weak_dd'] = ['weak_dd', 'weak_dd', 'weak_dd']
def define_supermodelchems(self):
"""
"""
self.mc['CCSD-CP-adz'] = ['CCSD-CP-adz', 'CCSD-CP-hadz', 'CCSD-CP-adz']
self.mc['CCSD-CP-atz'] = ['CCSD-CP-atz', 'CCSD-CP-hatz', 'CCSD-CP-atz']
self.mc['CCSD-CP-adtz'] = ['CCSD-CP-adtz', 'CCSD-CP-hadtz', 'CCSD-CP-adtz']
self.mc['CCSD-CP-adtzadz'] = ['CCSD-CP-adtzadz', 'CCSD-CP-adtzhadz', 'CCSD-CP-adtzadz']
self.mc['CCSD-CP-atzadz'] = ['CCSD-CP-atzadz', 'CCSD-CP-atzhadz', 'CCSD-CP-atzadz']
self.mc['CCSD-CP-atqzadz'] = ['CCSD-CP-atqzadz', 'CCSD-CP-atqzhadz', 'CCSD-CP-atqzadz']
self.mc['CCSD-CP-atzadtz'] = ['CCSD-CP-atzadtz', 'CCSD-CP-atzhadtz', 'CCSD-CP-atzadtz']
self.mc['CCSD-CP-atqzadtz'] = ['CCSD-CP-atqzadtz', 'CCSD-CP-atqzhadtz', 'CCSD-CP-atqzadtz']
self.mc['CCSD-CP-atqzatz'] = ['CCSD-CP-atqzatz', 'CCSD-CP-atqzhatz', 'CCSD-CP-atqzatz']
self.mc['CCSDT-CP-adz'] = ['CCSDT-CP-adz', 'CCSDT-CP-hadz', 'CCSDT-CP-adz']
self.mc['CCSDT-CP-atz'] = ['CCSDT-CP-atz', 'CCSDT-CP-hatz', 'CCSDT-CP-atz']
self.mc['CCSDT-CP-adtz'] = ['CCSDT-CP-adtz', 'CCSDT-CP-hadtz', 'CCSDT-CP-adtz']
self.mc['CCSDT-CP-adtzadz'] = ['CCSDT-CP-adtzadz', 'CCSDT-CP-adtzhadz', 'CCSDT-CP-adtzadz']
self.mc['CCSDT-CP-atzadz'] = ['CCSDT-CP-atzadz', 'CCSDT-CP-atzhadz', 'CCSDT-CP-atzadz']
self.mc['CCSDT-CP-atqzadz'] = ['CCSDT-CP-atqzadz', 'CCSDT-CP-atqzhadz', 'CCSDT-CP-atqzadz']
self.mc['CCSDT-CP-atzadtz'] = ['CCSDT-CP-atzadtz', 'CCSDT-CP-atzhadtz', 'CCSDT-CP-atzadtz']
self.mc['CCSDT-CP-atqzadtz'] = ['CCSDT-CP-atqzadtz', 'CCSDT-CP-atqzhadtz', 'CCSDT-CP-atqzadtz']
self.mc['CCSDT-CP-atqzatz'] = ['CCSDT-CP-atqzatz', 'CCSDT-CP-atqzhatz', 'CCSDT-CP-atqzatz']
# print certain statistic for all 4 db and summary and indiv sys if min or max
fnreservoir = {}
fnreservoir['blankslat'] = r"""Errors with respect to Benchmark. Guide lines are at 0, 0.3, and 1.0 kcal/mol overbound ($-$) and underbound ($+$)."""
fnreservoir['5min'] = r"""Only equilibrium and near-equilibrium systems included. (All S22 and HSG, 50/194 NBC10, 28/118 HBC6.)"""
fnreservoir['liliowa'] = r"""{0}MAE (dark by {1} kcal/mol) for subsets in residue classes cation, anion, polar, aliphatic, \& aromatic (L to R)."""
fnreservoir['flat'] = r"""{0}Errors with respect to benchmark within $\pm${1} kcal/mol. Guide lines are at {2} overbound ($-$) and underbound ($+$)."""
| gpl-2.0 |
Quantipy/quantipy | quantipy/core/builds/powerpoint/transformations.py | 1 | 21946 | # encoding: utf-8
'''
@author: Majeed.sahebzadha
'''
from __future__ import unicode_literals
import numpy as np
import pandas as pd
from math import ceil
import re
import operator
from quantipy.core.helpers import functions as helpers
''' Simplified access to, and manipulation of, the pandas dataframe.
Contains various helper functions.
'''
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def clean_df_values(old_df, replace_this, replace_with_that, regex_bol, as_type):
'''
'''
new_df = old_df.replace(replace_this,
replace_with_that,
regex=regex_bol).astype(as_type)
return new_df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def case_insensitive_matcher(check_these, against_this):
'''
performs the case-insensitive search of given list of items against df rows or columns and pulls out
matched items from the df.
'''
matched = [v
for x,d in enumerate(check_these)
for i,v in enumerate(against_this)
if v.lower() == d.lower()
]
return matched
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def remove_percentage_sign(old_df):
'''
'''
new_df = old_df.replace('%','',regex=True).astype(np.float64)
return new_df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def drop_null_rows(old_df, axis_type=1):
'''
drop rows with all columns having value 0
'''
new_df = old_df.loc[(df!=0).any(axis=axis_type)]
return new_df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def auto_sort(df, fixed_categories=[], column_position=0, ascend=True):
'''
Sorts a flattened (non multiindexed) panda dataframe whilst excluding given rows
Params:
-------
df: pandas dataframe
fixed_categories: list
list of row labels
example: [u'Other', u"Don't know/ can't recall", u'Not applicable']
column_position: int
ascend: boolean
Sort ascending vs. descending
'''
# ensure df is not empty
if not df.empty:
# ensure df is not multiindexed
nblevels = df.index.nlevels
if nblevels == 2:
raise Exception('Expected Flat DF, got multiindex DF')
# ensure fixed_categories is not empty
if fixed_categories:
#reindex df because it might contain duplicates
df = df.reset_index()
#df with no fixed categories, then sort.
df_without_fc = df.loc[~df[df.columns[0]].isin(fixed_categories)]
if pd.__version__ == '0.19.2':
df_without_fc = df_without_fc.sort_values(by=df.columns[column_position+1], ascending=ascend)
else:
df_without_fc = df_without_fc.sort(columns=df.columns[column_position+1], ascending=ascend)
#put each row as a tuple in a list
tups = []
for x in df_without_fc.itertuples():
tups.append(x)
#get fixed categories as a df
df_fc = df[~df.index.isin(df_without_fc.index)]
#convert fixed categories to rows of tuples,
#then insert row to tups list in a specific index
for x in df_fc.itertuples():
tups.insert(x[0], x)
#remove the indexes from the list of tuples
filtered_tups = [x[1:] for x in tups]
#put all the items in the tups list together to build a df
new_df = pd.DataFrame(filtered_tups, columns=list(df.columns.values))
new_df = new_df.set_index(df.columns[0])
else:
if pd.__version__ == '0.19.2':
new_df = df.sort_values(by=df.columns[column_position], ascending=ascend)
else:
new_df = df.sort(columns=df.columns[column_position], ascending=ascend)
return new_df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def all_same(numpy_list):
'''
'''
val = numpy_list.tolist()
return all(x == val[0] for x in val)
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def find_dups(df, orientation='Side'):
'''
Looks for duplicate labels in a df. Converts axis
labels to a list and then returns duplicate index from list.
If the list contains duplicates then a statememnt is returned.
'''
if orientation == 'Side':
mylist = list(df.index.values)
axis = 'row'
else:
mylist = list(df.columns.values)
axis = 'column'
dup_idx = [i for i, x in enumerate(mylist) if mylist.count(x) > 1]
if dup_idx:
statement = ("\n{indent:>10}*Warning: This table/chart contains duplicate "
"{orientation} labels".format(
indent='',
orientation=axis))
else:
statement = ''
return statement
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def df_splitter(df, min_rows, max_rows):
'''
returns a list of dataframes sliced as evenly as possible
'''
#ensure the indexs are strings not ints or floats
if not isinstance(df.index, unicode):
df.index = df.index.map(unicode)
row_count = len(df.index)
maxs = pd.Series(range(min_rows, max_rows+1))
rows = pd.Series([row_count]*maxs.size)
mods = rows % maxs
splitter = maxs[mods >= min_rows].max()
if row_count <= max_rows:
splitter = 1
else:
splitter = ceil(row_count/float(splitter))
size = int(ceil(float(len(df)) / splitter))
return [df[i:i + size] for i in range(0, len(df), size)]
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def strip_html_tags(text):
'''
Strip HTML tags from any string and transform special entities
'''
rules = [
{r'<[^<]+?>': u''}, # remove remaining tags
{r'^\s+' : u'' }, # remove spaces at the beginning
{r'\,([a-zA-Z])': r', \1'}, # add space after a comma
{r'\s+' : u' '} # replace consecutive spaces
]
for rule in rules:
for (k,v) in rule.items():
regex = re.compile(k)
text = regex.sub(v, text)
# replace special strings
special = {
' ': ' ',
'&': '&',
'"': '"',
'<': '<',
'>': '>',
'**': '',
"’": "'"
}
for (k,v) in special.items():
text = text.replace(k, v)
return text
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def clean_axes_labels(df):
'''
Cleans dataframe labels. Strips html code, double white spaces and so on.
Params:
-------
df: pandas dataframe
'''
#standardise all index/column elements as unicode
df_index_labels = df.index.map(unicode)
df_col_labels = df.columns.map(unicode)
# df_index_labels = [unicode(w)
# if not isinstance(w, unicode) and not isinstance(w, str)
# else w
# for w in df.index.values]
# df_col_labels = df.columns.values
col_labels = []
index_labels = []
for ctext in df_col_labels:
ctext = strip_html_tags(ctext)
col_labels.append(ctext)
for indtext in df_index_labels:
indtext = strip_html_tags(indtext)
index_labels.append(indtext)
df.columns = col_labels
df.index = index_labels
return df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def color_setter(numofseries, color_order='reverse'):
'''
'''
color_set = [(147,208,35), (83,172,175), (211,151,91), (17,124,198), (222,231,5), (136,87,136),
(88,125,21), (49,104,106), (143,91,38), (10,74,119), (133,139,3), (82,52,82),
(171,224,72), (117,189,191), (220,172,124), (38,155,236), (242,250,40), (165,115,165),
(118,166,28), (66,138,141), (190,121,51), (14,99,158), (178,185,4), (109,70,109),
(192,232,118), (152,205,207), (229,193,157), (92, 180,241), (245,252,94), (188,150,188),
(74,104,18), (41,87,88), (119,75,32), (9,62,99), (111,116,3), (68,44,68),
(197,227,168), (176,209,210), (229,199,178), (166,188,222), (235,240,166), (193,177,193),
(202,229,175), (182,212,213), (231,203,184), (174,194,224), (237,241,174), (198,183,198)]
color_set = color_set[0:numofseries]
if color_order == 'reverse':
return color_set[::-1]
else:
return color_set
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def place_vals_in_labels(old_df, base_position=0, orientation='side', drop_position=True):
'''
Takes values from a given column or row and inserts it to the df's row or column labels.
Normally used to insert base values in row or column labels.
'''
if orientation == 'side':
#grab desired column's values, normally index 0
col_vals = old_df.ix[:,[base_position]].values
#col_vals returns a list of list which needs flattening
flatten_col_vals = [item for sublist in col_vals for item in sublist]
#grab row labels
index_labels = old_df.index
new_labels_list = {}
for x,y in zip(index_labels, flatten_col_vals):
new_labels_list.update({x : x + " (n=" + str(y) +")"})
new_df = old_df.rename(index=new_labels_list, inplace=False)
if drop_position:
new_df = new_df.drop(new_df.columns[[base_position]], axis=1, inplace=False)
else:
#grab desired row's values, normally index 0
row_vals = old_df.ix[[base_position],:].values
#row_vals returns a list of list which needs flattening
flatten_col_vals = [item for sublist in row_vals for item in sublist]
#grab row labels
col_labels = df.columns
#rename rows one by one.
new_labels_list = {}
for x,y in zip(index_labels, flatten_col_vals):
new_labels_list.update({x : x + " (n=" + str(y) +")"})
new_df = old_df.rename(columns=new_labels_list, inplace=False)
if drop_position:
new_df = new_df.drop(new_df.index[[base_position]], axis=0, inplace=False)
return new_df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def get_qestion_labels(cluster_name, meta, table_name=None):
'''
'''
question_label_dict ={}
text_key = meta['lib']['default text']
table_list = cluster_name.keys()
for table in table_list:
view = cluster_name[table][cluster_name[table].data_key][cluster_name[table].filter][table][cluster_name[table].content_of_axis[0]][cluster_name[table].views[1]]
vdf = view.dataframe
# vdf = drop_hidden_codes(vdf, meta)
# df = index_from_meta(vdf, meta, vdf)
# question_label = df.index.get_level_values(level=0)[0]
# question_label_dict[table] = question_label
qname = vdf.index.get_level_values(0).tolist()[0]
vdf_meta = meta['columns'].get(qname, '%s not in the columns set in the meta' % (qname))
question_label_dict[table] = vdf_meta['text'][text_key]
# question_label_dict[table] = vdf_meta['text'][text_key]
if table_name:
return question_label_dict[table_name]
else:
return question_label_dict
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def validate_cluster_orientations(cluster):
'''
Make sure that the chains follow the rule:
- All chains must have the same orientation, x or y.
'''
if len(set([
cluster[chain_name].orientation
for chain_name in cluster.keys()
])) != 1:
raise Exception(
"Chain orientations must be consistent. Please review chain "
"specification"
)
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def get_base(df, base_description, is_mask):
'''
Constructs base text for any kind of chart, single, multiple, grid.
Params:
-------
df: pandas dataframe
base_description: str
'''
num_to_str = lambda string: str(int(round(string)))
base_text_format = lambda txt, num: '{} ({})'.format(txt, num_to_str(num))
#standardise all index/column elements as unicode
df_index_labels = df.index.map(unicode)
df_col_labels = df.columns.map(unicode)
# get col labels and row values
top_members = df.columns.values
base_values = df.values
# count row/col
numofcols = len(df.columns)
numofrows = len(df.index)
#if base description is empty then
if base_description:
#example of what the base description would look like - 'Base: Har minst ett plagg'
#remove the word "Base:" from the description
description = base_description.split(': ')[-1]
#grab the label for base from the df
base_label = df.index[0]
#put them together
base_description = '{}: {}'.format(base_label, description)
else:
base_description = df.index.values[0]
base_description = base_description.strip()
#single series format
if numofcols == 1:
base_text = base_text_format(base_description, base_values[0][0])
#multi series format
elif numofcols > 1:
# if all_same(base_values[0]):
# base_text = base_text_format(base_description, base_values[0][0])
# else:
if not is_mask:
it = zip(top_members, base_values[0])
base_texts = ', '.join([base_text_format(x, y) for x, y in it])
base_text = ' - '.join([base_description, base_texts])
else:
base_text = base_text_format(base_description, base_values[0][0])
return base_text
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def replace_decimal_point_with(df, replacer=","):
'''
'''
for col in df.columns:
df[col] = pd.Series(["{0}".format(val) for val in df[col]], index = df.index)
return df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def set_column_sequence(dataframe, seq):
'''
Takes a dataframe and a subsequence of its columns, returns dataframe with seq as first columns
'''
cols = seq[:] # copy so we don't mutate seq
for x in dataframe.columns:
if x not in cols:
cols.append(x)
return dataframe[cols]
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def round_df_cells(df, decimal_points):
'''
'''
if decimal_points == 0:
df = df.applymap(lambda x: int(round(x)))
else:
df = df.applymap(lambda x: round(x, decimal_points))
return df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def reverse_order(old_df, orientation='side'):
'''
Will reverse the order of rows or columns
'''
if orientation == 'side':
df = old_df.iloc[::-1]
else:
df = old_df[old_df.columns[::-1]]
return df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def get_selection_by_index(df, position, orientation='side'):
'''
Grabs and returns a single column or row.
example: myrow = get_selection_by_index(mydf, 2, 'side')
grabs row 2 from df.
'''
if orientation == 'side':
#will return a single row
df = df.ix[[position],:]
else:
#will return a single col
df = df.ix[:,[position]]
return df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def del_by_label(df, label_to_del, orientation='side'):
'''
deletes a single or multiple row or col labels from df
param - label_to_del: takes a list of labels
'''
#if what's passed into label_to_del is not in a list then
#put it in a list
if not isinstance(label_to_del, list):
label_to_del = [label_to_del]
if orientation=='side':
orientation=0
else:
orientation=1
df = df.drop([label_to_del], axis=orientation, inplace=True)
return df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def rename_label(df, old_label, new_label, orientation='side'):
'''
Renames a single row or cols label
'''
if orientation == 'side':
df.rename(index={old_label: new_label}, inplace=True)
else:
df.rename(columns={old_label: new_label}, inplace=True)
return df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def drop_hidden_codes(view):
'''
'''
#drop hidden codes
if 'x_hidden_codes' in view.meta():
vdf = helpers.deep_drop(
view.dataframe,
view.meta()['x_hidden_codes'],
axes=0)
else:
vdf = view.dataframe
return vdf
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def paint_df(vdf, view, meta, text_key):
'''
'''
#add question and value labels to df
if 'x_new_order' in view.meta():
df = helpers.paint_dataframe(
df=vdf.copy(),
meta=meta,
ridx=view.meta()['x_new_order'],
text_key=text_key)
else:
df = helpers.paint_dataframe(
df=vdf.copy(),
meta=meta,
text_key=text_key)
return df
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def partition_view_df(view, values=False, data_only=False, axes_only=False):
'''
Disassembles a view dataframe object into its
inner-most index/columns parts (by dropping the first level)
and the actual data.
Parameters
----------
view : Quantipy view
values : boolean, optional
If True will return the np.array
containing the df values instead of a dataframe
data_only : boolean, optional
If True will only return the data component of the view dataframe
axes_only : boolean, optional
If True will only return the inner-most index and columns component
of the view dataframe.
Returns
-------
data, index, columns : dataframe (or np.array of values), index, columns
'''
df = view.copy()
if isinstance(df.index, pd.MultiIndex):
df.index = df.index.droplevel()
if isinstance(df.columns, pd.MultiIndex):
df.columns = df.columns.droplevel()
index = df.index
columns = df.columns
data = df if not values else df.values
if data_only:
return data
elif axes_only:
return index.tolist(), columns.tolist()
else:
return data, index.tolist(), columns.tolist()
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
def is_grid_element(table_name, table_pattern):
'''
Checks if a table is a grid element or not
Parameters
----------
'''
matches = table_pattern.findall(table_name)
if (len(matches)>0 and len(matches[0])==2):
matched = True
else:
matched = False
return matched
| mit |
MicrosoftGenomics/FaST-LMM | fastlmm/association/snp_set.py | 1 | 6092 | from fastlmm.util.runner import *
import logging
import fastlmm.pyplink.plink as plink
import fastlmm.util.util as flutil
import numpy as np
from fastlmm.inference.lmm_cov import LMM as fastLMM
import scipy.stats as stats
from fastlmm.util.pickle_io import load, save
import time
import pandas as pd
def snp_set(
test_snps,
set_list,
pheno,
covar = None,
output_file_name = None,
G0 = None,
test="lrt",
write_lrtperm = False,
nperm = 10,
npermabs = None,
mpheno=1,
G0_fit="qq",
qmax=0.1,
seed = None,
minsetsize = None,
maxsetsize = None,
mindist=0,
idist=1,
show_pvalue_5050 = False
):
"""
Function performing GWAS on sets of snps
:param test_snps: The base name of the file containing the SNPs for alternative kernel. The file must be in PLINK Bed format.
:type test_snps: a string
:param set_list: The name of a tab-delimited file defining the sets. The file should contain two-columns 'snp' and 'set'.
:type set_list: a string
:param pheno: The name of a file containing the phenotype. The file must be in PLINK phenotype format.
:type pheno: a string
:param covar: covariate information, optional: The name of a file in PLINK phenotype format.
:type covar: a 'pheno dictionary' or a string
:param output_file_name: Name of file to write results to, optional. If not given, no output file will be created.
:type output_file_name: file name
:param G0: Training SNPs from which to construct a similarity kernel. It should be the base name of files in PLINK Bed or Ped format.
:type G0: a string
:param test: 'lrt' (default) or 'sc_davies'
:type test: a string
:param write_lrtperm: (default: False) If True, write the lrtperm vector (dictated by seed) to a file.
:type write_lrtperm: boolean
:param nperm: (default: 10) number of permutations per test
:type nperm: number
:param npermabs: (default: None) absolute number of permutations
:type npermabs: number
:param mpheno: (default: 1) integer, starting at 1, representing the index of the phenotype tested
:type mpheno: number
:param G0_fit: (default: "qq") How to fit G0. Should be either "qq" or "ml"
:type G0_fit: string
:param qmax: (default: .1) Use the top qmax fraction of G0 distrib test statistics to fit the G0 distribution
:type qmax: number
:param seed: (optional) Random seed used to generate permutations for lrt G0 fitting.
:type seed: number
:param minsetsize: (optional) only include sets at least this large (inclusive)
:type minsetsize: number
:param maxsetsize: (optional) only include sets no more than this large (inclusive)
:type maxsetsize: number
:param mindist: (default 0) SNPs within mindist from the test SNPs will be removed from
:type mindist: number
:param idist: (default: 1) the type of position to use with mindist
1, genomic distance
2, base-pair distance
:type idist: number
:param show_pvalue_5050: (default: False) show a conservative P-value arising from an assumed null distribution that is a 50-50 mixture distribution of 0 and 1 dof chi squares [Molenberghs and Verbeke, 2003].
Provided for backwards compatibility.
:type show_pvalue_5050: Boolean
:rtype: Pandas dataframe with one row per set.
:Example:
>>> import logging
>>> from fastlmm.association import snp_set
>>> logging.basicConfig(level=logging.INFO)
>>> result_dataframe = snp_set(
... test_snps = '../../tests/datasets/all_chr.maf0.001.N300',
... set_list = '../../tests/datasets/set_input.23.txt',
... pheno = '../../tests/datasets/phenSynthFrom22.23.N300.txt')
>>> print result_dataframe.iloc[0].SetId, round(result_dataframe.iloc[0]['P-value'],15)
set23 0.0
"""
assert test=="lrt" or test=="sc_davies", "Expect test to be 'lrt' or 'sc_davies'"
if G0 is None:
nullModel={'effect':'fixed', 'link':'linear'}
altModel={'effect':'mixed', 'link':'linear'}
else:
nullModel={'effect':'mixed', 'link':'linear'}
altModel={'effect':'mixed', 'link':'linear'}
if test=="lrt":
test="lrt_up"
if output_file_name is None:
import tempfile
fileno, output_file_name = tempfile.mkstemp()
fptr= os.fdopen(fileno)
is_temp = True
else:
is_temp = False
from fastlmm.association.FastLmmSet import FastLmmSet
fast_lmm_set = FastLmmSet(
outfile=output_file_name,
phenofile=pheno,
alt_snpreader=test_snps,
altset_list=set_list,
covarfile=covar,
filenull=G0,
nperm=nperm,
mindist=mindist,
idist=idist,
mpheno=mpheno,
nullfit = G0_fit,
qmax=qmax,
test=test,
autoselect=False,
nullModel=nullModel,
altModel=altModel,
npermabs = npermabs,
calseed = seed,
minsetsize = minsetsize,
maxsetsize = maxsetsize,
write_lrtperm = write_lrtperm,
show_pvalue_5050 = show_pvalue_5050,
)
result = Local().run(fast_lmm_set)
dataframe=pd.read_csv(output_file_name,delimiter='\t',comment=None) #Need \t instead of \s because the output has tabs by design and spaces in column names(?)
if is_temp:
fptr.close()
os.remove(output_file_name)
return dataframe
if __name__ == "__main__":
#import logging
#from fastlmm.association import snp_set
#logging.basicConfig(level=logging.INFO)
#result_dataframe = snp_set(
# test_snps = '../../tests/datasets/all_chr.maf0.001.N300',
# set_list = '../../tests/datasets/set_input.23.txt',
# pheno = '../../tests/datasets/phenSynthFrom22.23.N300.txt')
#print result_dataframe.iloc[0].SetId, round(result_dataframe.iloc[0]['P-value_lrt'],15)
##set23 0.0
import doctest
doctest.testmod()
print "done"
| apache-2.0 |
Ziqi-Li/bknqgis | pandas/pandas/tests/sparse/test_arithmetics.py | 18 | 19342 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseArrayArithmetics(object):
_base = np.array
_klass = pd.SparseArray
def _assert(self, a, b):
tm.assert_numpy_array_equal(a, b)
def _check_numeric_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid='ignore', divide='ignore'):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
# sparse & sparse
self._assert((a + b).to_dense(), a_dense + b_dense)
self._assert((b + a).to_dense(), b_dense + a_dense)
self._assert((a - b).to_dense(), a_dense - b_dense)
self._assert((b - a).to_dense(), b_dense - a_dense)
self._assert((a * b).to_dense(), a_dense * b_dense)
self._assert((b * a).to_dense(), b_dense * a_dense)
# pandas uses future division
self._assert((a / b).to_dense(), a_dense * 1.0 / b_dense)
self._assert((b / a).to_dense(), b_dense * 1.0 / a_dense)
# ToDo: FIXME in GH 13843
if not (self._base == pd.Series and a.dtype == 'int64'):
self._assert((a // b).to_dense(), a_dense // b_dense)
self._assert((b // a).to_dense(), b_dense // a_dense)
self._assert((a % b).to_dense(), a_dense % b_dense)
self._assert((b % a).to_dense(), b_dense % a_dense)
self._assert((a ** b).to_dense(), a_dense ** b_dense)
self._assert((b ** a).to_dense(), b_dense ** a_dense)
# sparse & dense
self._assert((a + b_dense).to_dense(), a_dense + b_dense)
self._assert((b_dense + a).to_dense(), b_dense + a_dense)
self._assert((a - b_dense).to_dense(), a_dense - b_dense)
self._assert((b_dense - a).to_dense(), b_dense - a_dense)
self._assert((a * b_dense).to_dense(), a_dense * b_dense)
self._assert((b_dense * a).to_dense(), b_dense * a_dense)
# pandas uses future division
self._assert((a / b_dense).to_dense(), a_dense * 1.0 / b_dense)
self._assert((b_dense / a).to_dense(), b_dense * 1.0 / a_dense)
# ToDo: FIXME in GH 13843
if not (self._base == pd.Series and a.dtype == 'int64'):
self._assert((a // b_dense).to_dense(), a_dense // b_dense)
self._assert((b_dense // a).to_dense(), b_dense // a_dense)
self._assert((a % b_dense).to_dense(), a_dense % b_dense)
self._assert((b_dense % a).to_dense(), b_dense % a_dense)
self._assert((a ** b_dense).to_dense(), a_dense ** b_dense)
self._assert((b_dense ** a).to_dense(), b_dense ** a_dense)
def _check_bool_result(self, res):
assert isinstance(res, self._klass)
assert res.dtype == np.bool
assert isinstance(res.fill_value, bool)
def _check_comparison_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid='ignore'):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
#
# sparse & sparse
self._check_bool_result(a == b)
self._assert((a == b).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b)
self._assert((a != b).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b)
self._assert((a >= b).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b)
self._assert((a <= b).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b)
self._assert((a > b).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b)
self._assert((a < b).to_dense(), a_dense < b_dense)
# sparse & dense
self._check_bool_result(a == b_dense)
self._assert((a == b_dense).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b_dense)
self._assert((a != b_dense).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b_dense)
self._assert((a >= b_dense).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b_dense)
self._assert((a <= b_dense).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b_dense)
self._assert((a > b_dense).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b_dense)
self._assert((a < b_dense).to_dense(), a_dense < b_dense)
def _check_logical_ops(self, a, b, a_dense, b_dense):
# sparse & sparse
self._check_bool_result(a & b)
self._assert((a & b).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b)
self._assert((a | b).to_dense(), a_dense | b_dense)
# sparse & dense
self._check_bool_result(a & b_dense)
self._assert((a & b_dense).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b_dense)
self._assert((a | b_dense).to_dense(), a_dense | b_dense)
def test_float_scalar(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=0)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=2)
self._check_numeric_ops(a, 1, values, 1)
self._check_numeric_ops(a, 0, values, 0)
self._check_numeric_ops(a, 3, values, 3)
def test_float_scalar_comparison(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=0)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = self._klass(values, kind=kind, fill_value=2)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
def test_float_same_index(self):
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
values = self._base([0., 1., 2., 6., 0., 0., 1., 2., 1., 0.])
rvalues = self._base([0., 2., 3., 4., 0., 0., 1., 3., 2., 0.])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_same_index_comparison(self):
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
values = self._base([0., 1., 2., 6., 0., 0., 1., 2., 1., 0.])
rvalues = self._base([0., 2., 3., 4., 0., 0., 1., 3., 2., 0.])
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
def test_float_array(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_array_different_kind(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = self._klass(values, kind='integer')
b = self._klass(rvalues, kind='block')
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind='integer', fill_value=0)
b = self._klass(rvalues, kind='block')
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind='integer', fill_value=0)
b = self._klass(rvalues, kind='block', fill_value=0)
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind='integer', fill_value=1)
b = self._klass(rvalues, kind='block', fill_value=2)
self._check_numeric_ops(a, b, values, rvalues)
def test_float_array_comparison(self):
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_int_array(self):
# have to specify dtype explicitly until fixing GH 667
dtype = np.int64
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
for kind in ['integer', 'block']:
a = self._klass(values, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, fill_value=0, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, fill_value=1, dtype=dtype, kind=kind)
assert a.dtype == dtype
b = self._klass(rvalues, fill_value=2, dtype=dtype, kind=kind)
assert b.dtype == dtype
self._check_numeric_ops(a, b, values, rvalues)
def test_int_array_comparison(self):
# int32 NI ATM
for dtype in ['int64']:
values = self._base([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
for kind in ['integer', 'block']:
a = self._klass(values, dtype=dtype, kind=kind)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=0)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, dtype=dtype, kind=kind, fill_value=1)
b = self._klass(rvalues, dtype=dtype, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_bool_same_index(self):
# GH 14000
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([True, False, True, True], dtype=np.bool)
rvalues = self._base([True, False, True, True], dtype=np.bool)
for fill_value in [True, False, np.nan]:
a = self._klass(values, kind=kind, dtype=np.bool,
fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool,
fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
def test_bool_array_logical(self):
# GH 14000
# when sp_index are the same
for kind in ['integer', 'block']:
values = self._base([True, False, True, False, True, True],
dtype=np.bool)
rvalues = self._base([True, False, False, True, False, True],
dtype=np.bool)
for fill_value in [True, False, np.nan]:
a = self._klass(values, kind=kind, dtype=np.bool,
fill_value=fill_value)
b = self._klass(rvalues, kind=kind, dtype=np.bool,
fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
def test_mixed_array_float_int(self):
for rdtype in ['int64']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
self._check_numeric_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == rdtype
self._check_numeric_ops(a, b, values, rvalues)
def test_mixed_array_comparison(self):
# int32 NI ATM
for rdtype in ['int64']:
values = self._base([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = self._base([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
for kind in ['integer', 'block']:
a = self._klass(values, kind=kind)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=0)
b = self._klass(rvalues, kind=kind, fill_value=0)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
a = self._klass(values, kind=kind, fill_value=1)
b = self._klass(rvalues, kind=kind, fill_value=2)
assert b.dtype == rdtype
self._check_comparison_ops(a, b, values, rvalues)
class TestSparseSeriesArithmetic(TestSparseArrayArithmetics):
_base = pd.Series
_klass = pd.SparseSeries
def _assert(self, a, b):
tm.assert_series_equal(a, b)
def test_alignment(self):
da = pd.Series(np.arange(4))
db = pd.Series(np.arange(4), index=[1, 2, 3, 4])
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=0)
sb = pd.SparseSeries(np.arange(4), index=[1, 2, 3, 4],
dtype=np.int64, fill_value=0)
self._check_numeric_ops(sa, sb, da, db)
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan)
sb = pd.SparseSeries(np.arange(4), index=[1, 2, 3, 4],
dtype=np.int64, fill_value=np.nan)
self._check_numeric_ops(sa, sb, da, db)
da = pd.Series(np.arange(4))
db = pd.Series(np.arange(4), index=[10, 11, 12, 13])
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=0)
sb = pd.SparseSeries(np.arange(4), index=[10, 11, 12, 13],
dtype=np.int64, fill_value=0)
self._check_numeric_ops(sa, sb, da, db)
sa = pd.SparseSeries(np.arange(4), dtype=np.int64, fill_value=np.nan)
sb = pd.SparseSeries(np.arange(4), index=[10, 11, 12, 13],
dtype=np.int64, fill_value=np.nan)
self._check_numeric_ops(sa, sb, da, db)
| gpl-2.0 |
e-sailing/openplotter | show_raw_adc_convert.py | 2 | 1439 | #!/usr/bin/env python
# This file is part of Openplotter.
# Copyright (C) 2015 by sailoog <https://github.com/sailoog/openplotter>
# e-sailing <https://github.com/e-sailing/openplotter>
# Openplotter is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
# Openplotter is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Openplotter. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import sys
from classes.conf import Conf
from classes.language import Language
edit = sys.argv[1]
conf = Conf()
Language(conf)
data = conf.get('SPI', 'value_' + str(edit))
listsave = []
try:
temp_list = eval(data)
except:
temp_list = []
for ii in temp_list:
listsave.append(ii)
plt.plot(*zip(*listsave))
plt.suptitle(
_('settings to convert raw adc values (unlinear and/or no factor and/or no offset)\n to useable values for input ').decode('utf8') + str(
edit), fontsize=12)
plt.xlabel(_('row adc value'), fontsize=12)
plt.ylabel(_('value in unit'), fontsize=12)
plt.show()
| gpl-2.0 |
nmayorov/scipy | scipy/signal/_max_len_seq.py | 11 | 4919 | # Author: Eric Larson
# 2014
"""Tools for MLS generation"""
import numpy as np
from ._max_len_seq_inner import _max_len_seq_inner
__all__ = ['max_len_seq']
# These are definitions of linear shift register taps for use in max_len_seq()
_mls_taps = {2: [1], 3: [2], 4: [3], 5: [3], 6: [5], 7: [6], 8: [7, 6, 1],
9: [5], 10: [7], 11: [9], 12: [11, 10, 4], 13: [12, 11, 8],
14: [13, 12, 2], 15: [14], 16: [15, 13, 4], 17: [14],
18: [11], 19: [18, 17, 14], 20: [17], 21: [19], 22: [21],
23: [18], 24: [23, 22, 17], 25: [22], 26: [25, 24, 20],
27: [26, 25, 22], 28: [25], 29: [27], 30: [29, 28, 7],
31: [28], 32: [31, 30, 10]}
def max_len_seq(nbits, state=None, length=None, taps=None):
"""
Maximum length sequence (MLS) generator.
Parameters
----------
nbits : int
Number of bits to use. Length of the resulting sequence will
be ``(2**nbits) - 1``. Note that generating long sequences
(e.g., greater than ``nbits == 16``) can take a long time.
state : array_like, optional
If array, must be of length ``nbits``, and will be cast to binary
(bool) representation. If None, a seed of ones will be used,
producing a repeatable representation. If ``state`` is all
zeros, an error is raised as this is invalid. Default: None.
length : int, optional
Number of samples to compute. If None, the entire length
``(2**nbits) - 1`` is computed.
taps : array_like, optional
Polynomial taps to use (e.g., ``[7, 6, 1]`` for an 8-bit sequence).
If None, taps will be automatically selected (for up to
``nbits == 32``).
Returns
-------
seq : array
Resulting MLS sequence of 0's and 1's.
state : array
The final state of the shift register.
Notes
-----
The algorithm for MLS generation is generically described in:
https://en.wikipedia.org/wiki/Maximum_length_sequence
The default values for taps are specifically taken from the first
option listed for each value of ``nbits`` in:
http://www.newwaveinstruments.com/resources/articles/m_sequence_linear_feedback_shift_register_lfsr.htm
.. versionadded:: 0.15.0
Examples
--------
MLS uses binary convention:
>>> from scipy.signal import max_len_seq
>>> max_len_seq(4)[0]
array([1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=int8)
MLS has a white spectrum (except for DC):
>>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, ifft, fftshift, fftfreq
>>> seq = max_len_seq(6)[0]*2-1 # +1 and -1
>>> spec = fft(seq)
>>> N = len(seq)
>>> plt.plot(fftshift(fftfreq(N)), fftshift(np.abs(spec)), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Circular autocorrelation of MLS is an impulse:
>>> acorrcirc = ifft(spec * np.conj(spec)).real
>>> plt.figure()
>>> plt.plot(np.arange(-N/2+1, N/2+1), fftshift(acorrcirc), '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
Linear autocorrelation of MLS is approximately an impulse:
>>> acorr = np.correlate(seq, seq, 'full')
>>> plt.figure()
>>> plt.plot(np.arange(-N+1, N), acorr, '.-')
>>> plt.margins(0.1, 0.1)
>>> plt.grid(True)
>>> plt.show()
"""
if taps is None:
if nbits not in _mls_taps:
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError('nbits must be between %s and %s if taps is None'
% (known_taps.min(), known_taps.max()))
taps = np.array(_mls_taps[nbits], np.intp)
else:
taps = np.unique(np.array(taps, np.intp))[::-1]
if np.any(taps < 0) or np.any(taps > nbits) or taps.size < 1:
raise ValueError('taps must be non-empty with values between '
'zero and nbits (inclusive)')
taps = np.ascontiguousarray(taps) # needed for Cython
n_max = (2**nbits) - 1
if length is None:
length = n_max
else:
length = int(length)
if length < 0:
raise ValueError('length must be greater than or equal to 0')
# We use int8 instead of bool here because NumPy arrays of bools
# don't seem to work nicely with Cython
if state is None:
state = np.ones(nbits, dtype=np.int8, order='c')
else:
# makes a copy if need be, ensuring it's 0's and 1's
state = np.array(state, dtype=bool, order='c').astype(np.int8)
if state.ndim != 1 or state.size != nbits:
raise ValueError('state must be a 1-D array of size nbits')
if np.all(state == 0):
raise ValueError('state must not be all zeros')
seq = np.empty(length, dtype=np.int8, order='c')
state = _max_len_seq_inner(taps, state, nbits, length, seq)
return seq, state
| bsd-3-clause |
aajtodd/zipline | zipline/history/history.py | 20 | 12233 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import numpy as np
import pandas as pd
import re
from zipline.finance import trading
from zipline.finance.trading import with_environment
from zipline.errors import IncompatibleHistoryFrequency
def parse_freq_str(freq_str):
# TODO: Wish we were more aligned with pandas here.
num_str, unit_str = re.match('([0-9]+)([A-Za-z]+)', freq_str).groups()
return int(num_str), unit_str
class Frequency(object):
"""
Represents how the data is sampled, as specified by the algoscript
via units like "1d", "1m", etc.
Currently only two frequencies are supported, "1d" and "1m"
- "1d" provides data at daily frequency, with the latest bar aggregating
the elapsed minutes of the (incomplete) current day
- "1m" provides data at minute frequency
"""
SUPPORTED_FREQUENCIES = frozenset({'1d', '1m'})
MAX_MINUTES = {'m': 1, 'd': 390}
MAX_DAYS = {'d': 1}
def __init__(self, freq_str, data_frequency):
if freq_str not in self.SUPPORTED_FREQUENCIES:
raise ValueError(
"history frequency must be in {supported}".format(
supported=self.SUPPORTED_FREQUENCIES,
))
# The string the at the algoscript specifies.
# Hold onto to use a key for caching.
self.freq_str = freq_str
# num - The number of units of the frequency.
# unit_str - The unit type, e.g. 'd'
self.num, self.unit_str = parse_freq_str(freq_str)
self.data_frequency = data_frequency
def next_window_start(self, previous_window_close):
"""
Get the first minute of the window starting after a window that
finished on @previous_window_close.
"""
if self.unit_str == 'd':
return self.next_day_window_start(previous_window_close,
self.data_frequency)
elif self.unit_str == 'm':
return self.next_minute_window_start(previous_window_close)
@staticmethod
def next_day_window_start(previous_window_close, data_frequency='minute'):
"""
Get the next day window start after @previous_window_close. This is
defined as the first market open strictly greater than
@previous_window_close.
"""
env = trading.environment
if data_frequency == 'daily':
next_open = env.next_trading_day(previous_window_close)
else:
next_open = env.next_market_minute(previous_window_close)
return next_open
@staticmethod
def next_minute_window_start(previous_window_close):
"""
Get the next minute window start after @previous_window_close. This is
defined as the first market minute strictly greater than
@previous_window_close.
"""
env = trading.environment
return env.next_market_minute(previous_window_close)
def window_open(self, window_close):
"""
For a period ending on `window_end`, calculate the date of the first
minute bar that should be used to roll a digest for this frequency.
"""
if self.unit_str == 'd':
return self.day_window_open(window_close, self.num)
elif self.unit_str == 'm':
return self.minute_window_open(window_close, self.num)
def window_close(self, window_start):
"""
For a period starting on `window_start`, calculate the date of the last
minute bar that should be used to roll a digest for this frequency.
"""
if self.unit_str == 'd':
return self.day_window_close(window_start, self.num)
elif self.unit_str == 'm':
return self.minute_window_close(window_start, self.num)
def day_window_open(self, window_close, num_days):
"""
Get the first minute for a daily window of length @num_days with last
minute @window_close. This is calculated by searching backward until
@num_days market_closes are encountered.
"""
env = trading.environment
open_ = env.open_close_window(
window_close,
1,
offset=-(num_days - 1)
).market_open.iloc[0]
if self.data_frequency == 'daily':
open_ = pd.tslib.normalize_date(open_)
return open_
def minute_window_open(self, window_close, num_minutes):
"""
Get the first minute for a minutely window of length @num_minutes with
last minute @window_close.
This is defined as window_close if num_minutes == 1, and otherwise as
the N-1st market minute after @window_start.
"""
if num_minutes == 1:
# Short circuit this case.
return window_close
env = trading.environment
return env.market_minute_window(window_close, count=-num_minutes)[-1]
def day_window_close(self, window_start, num_days):
"""
Get the window close for a daily frequency.
If the data_frequency is minute, then this will be the last minute of
last day of the window.
If the data_frequency is minute, this will be midnight utc of the last
day of the window.
"""
env = trading.environment
if self.data_frequency != 'daily':
return env.get_open_and_close(
env.add_trading_days(num_days - 1, window_start),
)[1]
return pd.tslib.normalize_date(
env.add_trading_days(num_days - 1, window_start),
)
def minute_window_close(self, window_start, num_minutes):
"""
Get the last minute for a minutely window of length @num_minutes with
first minute @window_start.
This is defined as window_start if num_minutes == 1, and otherwise as
the N-1st market minute after @window_start.
"""
if num_minutes == 1:
# Short circuit this case.
return window_start
env = trading.environment
return env.market_minute_window(window_start, count=num_minutes)[-1]
@with_environment()
def prev_bar(self, dt, env=None):
"""
Returns the previous bar for dt.
"""
if self.unit_str == 'd':
if self.data_frequency == 'minute':
def func(dt):
return env.get_open_and_close(
env.previous_trading_day(dt))[1]
else:
func = env.previous_trading_day
else:
func = env.previous_market_minute
# Cache the function dispatch.
self.prev_bar = func
return func(dt)
@property
def max_bars(self):
if self.data_frequency == 'daily':
return self.max_days
else:
return self.max_minutes
@property
def max_days(self):
if self.data_frequency != 'daily':
raise ValueError('max_days requested in minute mode')
return self.MAX_DAYS[self.unit_str] * self.num
@property
def max_minutes(self):
"""
The maximum number of minutes required to roll a bar at this frequency.
"""
if self.data_frequency != 'minute':
raise ValueError('max_minutes requested in daily mode')
return self.MAX_MINUTES[self.unit_str] * self.num
def normalize(self, dt):
if self.data_frequency != 'daily':
return dt
return pd.tslib.normalize_date(dt)
def __eq__(self, other):
return self.freq_str == other.freq_str
def __hash__(self):
return hash(self.freq_str)
def __repr__(self):
return ''.join([str(self.__class__.__name__),
"('", self.freq_str, "')"])
class HistorySpec(object):
"""
Maps to the parameters of the history() call made by the algoscript
An object is used here so that get_history calls are not constantly
parsing the parameters and provides values for caching and indexing into
result frames.
"""
FORWARD_FILLABLE = frozenset({'price'})
@classmethod
def spec_key(cls, bar_count, freq_str, field, ffill):
"""
Used as a hash/key value for the HistorySpec.
"""
return "{0}:{1}:{2}:{3}".format(
bar_count, freq_str, field, ffill)
def __init__(self, bar_count, frequency, field, ffill,
data_frequency='daily'):
# Number of bars to look back.
self.bar_count = bar_count
if isinstance(frequency, str):
frequency = Frequency(frequency, data_frequency)
if frequency.unit_str == 'm' and data_frequency == 'daily':
raise IncompatibleHistoryFrequency(
frequency=frequency.unit_str,
data_frequency=data_frequency,
)
# The frequency at which the data is sampled.
self.frequency = frequency
# The field, e.g. 'price', 'volume', etc.
self.field = field
# Whether or not to forward fill nan data. Only has an effect if this
# spec's field is in FORWARD_FILLABLE.
self._ffill = ffill
# Calculate the cache key string once.
self.key_str = self.spec_key(
bar_count, frequency.freq_str, field, ffill)
@property
def ffill(self):
"""
Wrapper around self._ffill that returns False for fields which are not
forward-fillable.
"""
return self._ffill and self.field in self.FORWARD_FILLABLE
def __repr__(self):
return ''.join([self.__class__.__name__, "('", self.key_str, "')"])
def days_index_at_dt(history_spec, algo_dt):
"""
Get the index of a frame to be used for a get_history call with daily
frequency.
"""
env = trading.environment
# Get the previous (bar_count - 1) days' worth of market closes.
day_delta = (history_spec.bar_count - 1) * history_spec.frequency.num
market_closes = env.open_close_window(
algo_dt,
day_delta,
offset=(-day_delta),
step=history_spec.frequency.num,
).market_close
if history_spec.frequency.data_frequency == 'daily':
market_closes = market_closes.apply(pd.tslib.normalize_date)
# Append the current algo_dt as the last index value.
# Using the 'rawer' numpy array values here because of a bottleneck
# that appeared when using DatetimeIndex
return np.append(market_closes.values, algo_dt)
def minutes_index_at_dt(history_spec, algo_dt):
"""
Get the index of a frame to be used for a get_history_call with minutely
frequency.
"""
# TODO: This is almost certainly going to be too slow for production.
env = trading.environment
return env.market_minute_window(
algo_dt,
history_spec.bar_count,
step=-1,
)[::-1]
def index_at_dt(history_spec, algo_dt):
"""
Returns index of a frame returned by get_history() with the given
history_spec and algo_dt.
The resulting index will have @history_spec.bar_count bars, increasing in
units of @history_spec.frequency, terminating at the given @algo_dt.
Note: The last bar of the returned frame represents an as-of-yet incomplete
time window, so the delta between the last and second-to-last bars is
usually always less than `@history_spec.frequency` for frequencies greater
than 1m.
"""
frequency = history_spec.frequency
if frequency.unit_str == 'd':
return days_index_at_dt(history_spec, algo_dt)
elif frequency.unit_str == 'm':
return minutes_index_at_dt(history_spec, algo_dt)
| apache-2.0 |
RPGOne/Skynet | scikit-learn-0.18.1/examples/linear_model/plot_ard.py | 21 | 2828 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
xavierwu/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
bmazin/ARCONS-pipeline | legacy/arcons_control/lib/pulses_v1.py | 1 | 21557 |
# encoding: utf-8
"""
pulses.py
Created by Ben Mazin on 2011-05-04.
Copyright (c) 2011 . All rights reserved.
"""
import numpy as np
import time
import os
from tables import *
import matplotlib
import scipy as sp
import scipy.signal
from matplotlib.pyplot import plot, figure, show, rc, grid
import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
import mpfit
#import numexpr
#from iqsweep import *
class Photon(IsDescription):
"""The pytables derived class that holds pulse packet data on the disk.
Put in a marker pulse with at = int(time.time()) and phase = -32767 every second.
"""
at = UInt32Col() # pulse arrival time in microseconds since last sync pulse
# phase = Int16Col() # optimally filtered phase pulse height
class RawPulse(IsDescription):
"""The pytables derived class that hold raw pulse data on the disk.
"""
starttime = Float64Col() # start time of pulse data
samprate = Float32Col() # sample rate of the data in samples/sec
npoints = Int32Col() # number of data points in the pulse
f0 = Float32Col() # resonant frequency data was taken at
atten1 = Float32Col() # attenuator 1 setting data was taken at
atten2 = Float32Col() # attenuator 2 setting data was taken at
Tstart = Float32Col() # temp data was taken at
I = Float32Col(2000) # I pulse data, up to 5000 points.
Q = Float32Col(2000)
class PulseAnalysis(IsDescription): # contains final template info
flag = Int16Col() # flag for quality of template. If this could be a bad template set > 0
count = Float32Col() # number of pulses going into this template
pstart = Int16Col() # index of peak of template
phasetemplate = Float64Col(2000)
phasenoise = Float64Col(800)
phasenoiseidx = Float64Col(800)
#optfilt = Complex128(800)
# fit quantities
trise = Float32Col() # fit value of rise time
tfall = Float32Col() # fit value of fall time
# optimal filter parameters
coeff = Float32Col(100) # coefficients for the near-optimal filter
nparam = Int16Col() # number of parameters in the filter
class BeamMap(IsDescription):
roach = UInt16Col() # ROACH board number (0-15) for now!
resnum = UInt16Col() # resonator number on roach board (corresponds to res # in optimal pulse packets)
f0 = Float32Col() # resonant frequency of center of sweep (can be used to get group name)
pixel = UInt32Col() # actual pixel number - bottom left of array is 0, increasing up
xpos = Float32Col() # physical X location in mm
ypos = Float32Col() # physical Y location in mm
scale = Float32Col(3) # polynomial to convert from degrees to eV
class ObsHeader(IsDescription):
target = StringCol(80)
datadir = StringCol(80) # directory where observation data is stored
calfile = StringCol(80) # path and filename of calibration file
beammapfile = StringCol(80) # path and filename of beam map file
version = StringCol(80)
instrument = StringCol(80)
telescope = StringCol(80)
focus = StringCol(80)
parallactic = Float64Col()
ra = Float64Col()
dec = Float64Col()
alt = Float64Col()
az = Float64Col()
airmass = Float64Col()
equinox = Float64Col()
epoch = Float64Col()
obslat = Float64Col()
obslong = Float64Col()
obsalt = Float64Col()
timezone = Int32Col()
localtime = StringCol(80)
ut = Float64Col()
lst = StringCol(80)
jd = Float64Col()
platescl = Float64Col()
exptime = Int32Col()
# Make a fake observation file
def FakeObservation(obsname, start, exptime):
# simulation parameters
nroach = 4 # number of roach boards
nres = 256 # number of pixels on each roach
xpix = 32 # pixels in x dir
ypix = 32 # pixels in y dir
R = 15 # mean energy resolution
good = 0.85 # fraction of resonators that are good
#exptime = 10 # duration of fake exposure in seconds
fullobspath = obsname.split("/")
obsfile = fullobspath.pop()
obspath = "/".join(fullobspath)+"/"
h5file = openFile(obsname, mode = "r")
carray = h5file.root.beammap.beamimage.read()
h5file.close()
filt1 = Filters(complevel=1, complib='zlib', fletcher32=False) # without minimal compression the files sizes are ridiculous...
h5file = openFile(obsname, mode = "a")
''' beam map inserted from beam map file during header gen
# make beamap table
bgroup = h5file.createGroup('/','beammap','Beam Map of Array')
filt = Filters(complevel=0, complib='zlib', fletcher32=False)
filt1 = Filters(complevel=1, complib='blosc', fletcher32=False) # without minimal compression the files sizes are ridiculous...
btable = h5file.createTable(bgroup, 'beammap', BeamMap, "Table of anaylzed beam map data",filters=filt1)
w = btable.row
# make beammap array - this is a 2d array (top left is 0,0. first index is column, second is row) containing a string with the name of the group holding the photon data
ca = h5file.createCArray(bgroup, 'beamimage', StringAtom(itemsize=40), (32,32), filters=filt1)
for i in xrange(nroach):
for j in xrange(nres):
w['roach'] = i
w['resnum'] = ((41*j)%256)
w['f0'] = 3.5 + (i%2)*.512 + 0.002*j
w['pixel'] = ((41*j)%256) + 256*i
w['xpos'] = np.floor(j/16)*0.1
w['ypos'] = (j%16)*0.1
if i == 1 or i == 3:
w['ypos'] = (j%16)*0.1 + 1.6
if i == 2 or i == 3:
w['xpos'] = np.floor(j/16)*0.1 + 1.6
w.append()
colidx = int(np.floor(j/16))
rowidx = 31 - j%16
if i == 1 or i == 3:
rowidx -= 16
if i >= 2:
colidx += 16
ca[rowidx,colidx] = 'r'+str(i)+'/p'+str( ((41*j)%256) )
h5file.flush()
carray = ca.read()
'''
# load up the 32x32 image we want to simulate
sourceim = plt.imread('/Users/ourhero/Documents/python/MazinLab/Arcons/ucsblogo.png')
sourceim = sourceim[:,:,0]
# make directory structure for pulse data
dptr = []
for i in xrange(nroach):
group = h5file.createGroup('/','r'+str(i),'Roach ' + str(i))
for j in xrange(nres):
subgroup = h5file.createGroup(group,'p'+str(j))
dptr.append(subgroup)
'''
# now go in an update the beamimages array to contain the name of the actual data array
for i in xrange(32):
for j in xrange(32):
name = h5file.getNode('/',name=ca[i,j])
for leaf in name._f_walkNodes('Leaf'):
newname = ca[i,j]+'/'+leaf.name
ca[i,j] = newname
'''
# create fake photon data
#start = np.floor(time.time())
# make VLArray tables for the photon data
vlarr=[]
for i in dptr:
tmpvlarr = h5file.createVLArray(i, 't'+str(int(start)), UInt32Atom(shape=()),expectedsizeinMB=0.1,filters=filt1)
vlarr.append(tmpvlarr)
idx = np.arange(2000)
for i in xrange(exptime):
print i
t1 = time.time()
for j in vlarr:
# sky photons
nphot = 1000 + int(np.random.randn()*np.sqrt(1000))
#arrival = np.uint32(idx[:nphot]*700.0 + np.random.randn(nphot)*100.0)
arrival = np.uint64(np.random.random(nphot)*1e6)
energy = np.uint64(np.round((20.0 + np.random.random(nphot)*80.0)*20.0))
photon = np.bitwise_or( np.left_shift(energy,12), arrival )
# source photons
# figure out where this group is on the array
pgroup = j._g_getparent().__str__()
#print "printing pgroup", pgroup
ngroup = (pgroup.split(' '))[0]+'/t'+str(start)
#print "printing ngroup", ngroup
cidx = np.where(carray == ngroup[1:])
#print "printing ngroup 1:" ,ngroup[1:]
#print "printing cidx", cidx
#print sourceim[cidx]
sphot = 100.0 * (sourceim[cidx])[0]
sphot += np.sqrt(sphot)*np.random.randn()
sphot = np.uint32(sphot)
#print sphot
if sphot >= 1.0:
arrival = np.uint64(np.random.random(sphot)*1e6)
energy = np.uint64( (60.0 + np.random.randn(sphot)*3.0)*20.0 )
source = np.bitwise_or( np.left_shift(energy,12), arrival )
plist = np.concatenate((photon,source))
else:
plist = photon
#splist = np.sort(plist)
j.append(plist)
t2 = time.time()
dt = t2-t1
if t2-t1 < 1:
#delay for 1 second between creating seconds of false data
time.sleep(1-dt)
'''
idx = np.arange(2000)
for i in xrange(exptime):
print i
t1 = time.time()
for j in vlarr:
# sky photons
nphot = 1000 + int(np.random.randn()*np.sqrt(1000))
#arrival = np.uint32(idx[:nphot]*700.0 + np.random.randn(nphot)*100.0)
arrival = np.uint32(np.random.random(nphot)*1e6)
energy = np.uint32(np.round((20.0 + np.random.random(nphot)*80.0)*20.0))
photon = np.bitwise_or( np.left_shift(arrival,12), energy )
# source photons
# figure out where this group is on the array
pgroup = j._g_getparent().__str__()
ngroup = (pgroup.split(' '))[0]
cidx = np.where(carray == ngroup[1:])
#print sourceim[cidx]
sphot = 100.0 * (sourceim[cidx])[0]
sphot += np.sqrt(sphot)*np.random.randn()
sphot = np.uint32(sphot)
#print sphot
if sphot >= 1.0:
arrival = np.uint32(np.random.random(sphot)*1e6)
energy = np.uint32( (60.0 + np.random.randn(sphot)*3.0)*20.0 )
source = np.bitwise_or( np.left_shift(arrival,12), energy )
plist = np.concatenate((photon,source))
else:
plist = photon
#splist = np.sort(plist)
j.append(plist)
'''
h5file.close()
# make a preview image from obsfile
def QuickLook(obsfile,tstart,tend):
h5file = openFile(obsfile, mode = "r")
image = np.zeros((32,32))
#mask = np.repeat(np.uint32(4095),2000)
# load beamimage
bmap = h5file.root.beammap.beamimage
for i in xrange(32):
for j in xrange(32):
photons = h5file.root._f_getChild(bmap[i][j])
for k in range(tstart,tend):
#energy = np.bitwise_and( mask[:len(photons[0])],photons[0])
image[i][j] += len(photons[k])
# subtract off sky
skysub = np.float32(image - np.median(image))
h5file.close()
# display the image
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.imshow(skysub,cmap='gray', interpolation='nearest')
cbar = fig.colorbar(cax)
plt.show()
# Make a pulse template from the pulses saved in filename
def MakeTemplate(pulsedat):
# open the pulse file
h5file = openFile(pulsedat, mode = "r")
r1 = h5file.root.r1
# create the template file
tfile = openFile(pulsedat.replace('.h5','-template.h5'), mode = "w", title = "Optimal filter data file created " + time.asctime() )
tempr1 = tfile.createGroup('/','r1','ROACH 1')
# loop through pulse data
for group in r1._f_walkGroups():
if group == r1: # walkgroups returns itself as first entry, so skip it - there is probably a more elegant way!
continue
print group
# go through all the raw pulses in table and generate the template
tP=np.zeros(2000,dtype='float64')
tA=np.zeros(2000,dtype='float64')
tPf=np.zeros(2000,dtype='float64')
tAf=np.zeros(2000,dtype='float64')
noise = np.zeros(800,dtype='float64')
# read the table into memory (too slow otherwise!)
dat = group.iqpulses.read()
N = len(dat)
count = 0.0
peaklist = []
idx = np.arange(2000)*2.0
fitidx = np.concatenate((idx[:900],idx[1800:]))
# center of loop
xc = 0.0
yc = 0.0
# determine median prepulse levels for first 100 pulses
I1m = np.median(dat['I'][:100,:900])
Q1m = np.median(dat['Q'][:100,:900])
# make a prelimiary template with 1000 pulses, then a better one with all of them
if N > 1000:
N = 1000
# first pass
for j in xrange(N):
I = dat['I'][j]
Q = dat['Q'][j]
# reference all pulses to first 100 pulses (1/f removal)
I += (I1m - np.median(I[1:900]))
Q += (Q1m - np.median(Q[1:900]))
# transform to phase
P1 = np.arctan2( Q-yc, I-xc )
#P1 = numexpr.evaluate('arctan2( Q-yc, I-xc )')
# remove phase wraps and convert to degrees
P2 = np.rad2deg(np.unwrap(P1))
# subtract baseline
fit = np.poly1d(np.polyfit(fitidx,np.concatenate((P2[:900],P2[1800:])),1))
P3 = P2 - fit(idx)
# skip pulses with bad baseline subtraction
stdev = np.std(P3[:100])
if np.abs(np.mean(P3[:100])-np.mean(P3[1900:])) > stdev*2.0 :
continue
# eliminate doubles
# first pass fit all non-noise pulses
peak = np.max(P3[980:1050])
peaklist.append(peak)
if peak < 15.0 or peak > 120.0:
continue
# if peak not near the center skip
ploc = (np.where(P3 == peak))[0]
if ploc < 980 or ploc > 1020:
continue
# align pulse so peak happens at center
P4 = np.roll(P3,1000-ploc)
# normalize and add to template
tP += P4/np.max(P4)
count += 1
print 'First Pass =',int(count),'pulses'
tP /= count
tA /= count
# make a second pass through using the initial template as the kernel to determine pulse start time
peaklist = np.asarray(peaklist)
pm = np.median(peaklist[np.where(peaklist>15)])
pdev = np.std(peaklist[np.where(peaklist>15)])
print pm,'+-',pdev,'degrees'
N = len(dat)
count = 0.0
t1 = time.time()
for j in xrange(N):
I = dat['I'][j]
Q = dat['Q'][j]
# reference all pulses to first 100 pulses (1/f removal)
I += (I1m - np.median(I[1:900]))
Q += (Q1m - np.median(Q[1:900]))
# transform to phase
P1 = np.arctan2( Q-yc, I-xc )
# remove phase wraps and convert to degrees
P2 = np.rad2deg(np.unwrap(P1))
# subtract baseline - this step is slow - speed up!
fit = np.poly1d(np.polyfit(fitidx,np.concatenate((P2[:900],P2[1800:])),1))
P3 = P2 - fit(idx)
# skip pulses with bad baseline subtraction
stdev = np.std(P3[:100])
if np.abs(np.mean(P3[:100])-np.mean(P3[1900:])) > stdev*2.0 :
continue
# eliminate doubles
# Only fit pulses near the peak
conv = np.convolve(tP[900:1500],P3)
#conv = scipy.signal.fftconvolve(tP[950:1462],np.concatenate( (P3,P3[0:48]) ) )
ploc = int((np.where(conv == np.max(conv)))[0] - 1160.0)
peak = np.max(P3[1000+ploc])
#print ploc,peak
if peak < pm - 4.0*pdev or peak > pm + 4.0*pdev:
continue
# if peak not near the center skip
if ploc < -30 or ploc > 30:
continue
# align pulse so peak happens at center
P4 = np.roll(P3,-ploc)
# normalize and add to template
tPf += P4/np.max(P4)
count += 1
# compute noise PSD
noise += np.abs( np.fft.fft(np.deg2rad(P4[50:850])) )**2
t2 = time.time()
tPf /= count
noise /= count
noiseidx = np.fft.fftfreq(len(noise),d=0.000002)
print 'Second Pass =',int(count),'pulses'
print 'Pulses per second = ', N/(t2-t1)
# calculate optimal filter parameters
# save the template information in a new file
# create a group off root for each resonator that contains iq sweep, pulse template, noise, and optimal filter coefficents
pgroup = tfile.createGroup(tempr1,group._v_name, 'data to set up optimal filtering' )
group.iqsweep.copy(newparent=pgroup) # copy in IQ sweep data
#filt = Filters(complevel=5, complib='zlib', fletcher32=True)
filt = Filters(complevel=0, complib='zlib', fletcher32=False)
table = tfile.createTable(pgroup, 'opt', PulseAnalysis, "optimal filter data",filters=filt)
w = table.row
if( count < 500 or pm < 10 or pm > 150):
w['flag'] = 1
else:
w['flag'] = 0
w['count'] = count
w['pstart'] = (np.where( tPf == np.max(tPf)))[0]
w['phasetemplate'] = tPf
w['phasenoise'] = noise
w['phasenoiseidx'] = noiseidx
w.append()
break
#plot(tPf)
plot(noiseidx,noise)
show()
h5file.close()
tfile.close()
def FakeTemplateData(): # make fake data and write it to a h5 file
filename = '/Users/bmazin/Data/Projects/pytest/fakepulse2.h5'
h5file = openFile(filename, mode='w', title = "Fake Pulse file created " + time.asctime() )
r1 = h5file.createGroup('/','r1','ROACH 1')
# open IQ sweep file
sweepdat = '/Users/bmazin/Data/Projects/pytest/ps_20110505-172336.h5'
iqfile = openFile(sweepdat, mode = "r")
swp = iqfile.root.sweeps
# loop through each IQ sweep in sweepddat and create fake pulses for it
for group in swp._f_walkGroups():
if group == swp: # walkgroups returns itself as first entry, so skip it - there is probably a more elegant way!
continue
print group
pgroup = h5file.createGroup(r1,group._v_name, 'IQ pulse data' )
pname = 'iqpulses'
#filt = Filters(complevel=5, complib='zlib', fletcher32=True)
filt = Filters(complevel=0, complib='zlib', fletcher32=False)
table = h5file.createTable(pgroup, pname, RawPulse, "IQ Pulse Data",filters=filt)
p = table.row
# copy the IQ sweep data into the file
group._f_copyChildren(pgroup)
trise = 0.1
tfall = 65.0
for j in xrange(1000):
p['starttime'] = time.time()
p['samprate'] = 500000.0
p['npoints'] = 2000
p['f0'] = 3.65
p['atten1'] = 30
p['atten2'] = 0
p['Tstart'] = 0.1
I = np.zeros(2000)
Q = np.zeros(2000)
idx = np.arange(1000,dtype='float32')
I[1000:2000] = (1.0 - np.exp( -idx/trise ) ) * np.exp(-idx/tfall) * 0.25
Q[1000:2000] = (1.0 - np.exp( -idx/trise ) ) * np.exp(-idx/tfall)
I += 2.0 - np.random.normal(size=2000)*.01 # add noise
Q += np.random.normal(size=2000)*.01
# move arrival time
I = np.roll(I, int((np.random.normal()*10.0)+0.5) )
Q = np.roll(Q, int((np.random.normal()*10.0)+0.5) )
p['I'] = np.concatenate( (I,np.zeros(2000-len(I))),axis=0 )
p['Q'] = np.concatenate( (Q,np.zeros(2000-len(Q))),axis=0 )
p.append()
table.flush()
h5file.close()
iqfile.close()
#print 'Running!'
#FakeTemplateData()
#pulsedat = '/Users/bmazin/Data/Projects/pytest/fakepulse2.h5'
#MakeTemplate(pulsedat)
#fakedat = '/Users/bmazin/Data/Projects/pytest/fakeobs.h5'
#FakeObservation(fakedat)
#QuickLook(fakedat,0,10)
#print 'Done.'
| gpl-2.0 |
hakonsbm/nest-simulator | examples/neuronview/neuronview.py | 2 | 10690 | # -*- coding: utf-8 -*-
#
# neuronview.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk # noqa
import pango # noqa
import gobject # noqa
from matplotlib.figure import Figure # noqa
from matplotlib.backends.backend_gtkagg import \
FigureCanvasGTKAgg as FigureCanvas # noqa
import matplotlib.gridspec as gridspec # noqa
import os # noqa
import nest # noqa
default_neuron = "iaf_psc_alpha"
default_stimulator = "dc_generator"
class Main():
def __init__(self):
self._gladefile = "neuronview.glade"
self._builder = gtk.Builder()
self._builder.add_from_file(self._gladefile)
self._builder.connect_signals(self)
self._win = self._builder.get_object("mainwindow")
self._win.resize(900, 700)
box = self._builder.get_object("box5")
self._stimulatordictview = DictView()
self._builder.get_object("scrolledwindow2").add(
self._stimulatordictview)
box = self._builder.get_object("box4")
self._neurondictview = DictView()
self._builder.get_object("scrolledwindow3").add(self._neurondictview)
self.populate_comboboxes()
self._figure = Figure(figsize=(5, 4), dpi=100)
canvas = FigureCanvas(self._figure)
canvas.set_size_request(200, 250)
canvas.show()
box = self._builder.get_object("box3")
bg_style = box.get_style().bg[gtk.STATE_NORMAL]
gtk_color = (bg_style.red_float, bg_style.green_float,
bg_style.blue_float)
self._figure.set_facecolor(gtk_color)
box.pack_start(canvas)
self._win.show()
gtk.main()
def update_figure(self, spikes, potentials):
if nest.GetKernelStatus("time") != 0.0:
self._figure.clear()
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 4])
ax0 = self._figure.add_subplot(gs[0])
ax0.plot(spikes[0]["times"], [1] * len(spikes[0]["times"]), ".")
ax0.set_yticks([])
ax0.set_xticks([])
ax1 = self._figure.add_subplot(gs[1])
ax1.plot(potentials[0]["times"], potentials[0]["V_m"], "r-")
ax1.set_ylabel("$V_m$ (mV)")
ax1.set_xlabel("time (s)")
# plt.tight_layout()
self._figure.canvas.draw()
def filter_statusdict(self, params):
for key in ["archiver_length", "available", "capacity",
"elementsize", "frozen", "global_id",
"instantiations", "is_refractory", "local",
"model", "element_type", "offset", "origin",
"receptor_types", "recordables",
"refractory_input", "rmax", "state", "t_spike",
"thread", "tlast", "tspike", "type_id", "vp",
"ymod"]:
if key in params.keys():
params.pop(key)
def populate_comboboxes(self):
neuronmodels = self._builder.get_object("neuronmodels")
neuronmodelsliststore = neuronmodels.get_model()
stimulatormodels = self._builder.get_object("stimulatormodels")
stimulatormodelsliststore = stimulatormodels.get_model()
neuron_it = None
stimulator_it = None
models = nest.Models("nodes")
models = [x for x in models if
x not in ["correlation_detector", "sli_neuron",
"iaf_psc_alpha_norec", "parrot_neuron",
"parrot_neuron_ps"]]
for entry in models:
try:
entrytype = nest.GetDefaults(entry)["element_type"]
except:
entrytype = "unknown"
if entrytype == "neuron":
it = neuronmodelsliststore.append([entry])
if entry == default_neuron:
neuron_it = it
elif entrytype == "stimulator":
it = stimulatormodelsliststore.append([entry])
if entry == default_stimulator:
stimulator_it = it
cell = gtk.CellRendererText()
neuronmodels.pack_start(cell, True)
neuronmodels.add_attribute(cell, 'text', 0)
neuronmodels.set_active_iter(neuron_it)
stimulatormodels.pack_start(cell, True)
stimulatormodels.add_attribute(cell, 'text', 0)
stimulatormodels.set_active_iter(stimulator_it)
docviewcombo = self._builder.get_object("docviewcombo")
docviewcomboliststore = docviewcombo.get_model()
docviewcomboliststore.append(["Stimulating device"])
it = docviewcomboliststore.append(["Neuron"])
docviewcombo.pack_start(cell, True)
docviewcombo.add_attribute(cell, 'text', 0)
docviewcombo.set_active_iter(it)
def get_help_text(self, name):
nest.ll_api.sli_run("statusdict /prgdocdir get")
docdir = nest.ll_api.sli_pop()
helptext = "No documentation available"
for subdir in ["cc", "sli"]:
filename = os.path.join(docdir, "help", subdir, name + ".hlp")
if os.path.isfile(filename):
helptext = open(filename, 'r').read()
return helptext
def on_model_selected(self, widget):
liststore = widget.get_model()
model = liststore.get_value(widget.get_active_iter(), 0)
statusdict = nest.GetDefaults(model)
self.filter_statusdict(statusdict)
if widget == self._builder.get_object("neuronmodels"):
self._neurondictview.set_params(statusdict)
if widget == self._builder.get_object("stimulatormodels"):
self._stimulatordictview.set_params(statusdict)
self.on_doc_selected(self._builder.get_object("docviewcombo"))
def on_doc_selected(self, widget):
liststore = widget.get_model()
doc = liststore.get_value(widget.get_active_iter(), 0)
docview = self._builder.get_object("docview")
docbuffer = gtk.TextBuffer()
if doc == "Neuron":
combobox = self._builder.get_object("neuronmodels")
if doc == "Stimulating device":
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
model = liststore.get_value(combobox.get_active_iter(), 0)
docbuffer.set_text(self.get_help_text(model))
docview.set_buffer(docbuffer)
docview.modify_font(pango.FontDescription("monospace 10"))
def on_simulate_clicked(self, widget):
nest.ResetKernel()
combobox = self._builder.get_object("stimulatormodels")
liststore = combobox.get_model()
stimulatormodel = liststore.get_value(combobox.get_active_iter(), 0)
params = self._stimulatordictview.get_params()
stimulator = nest.Create(stimulatormodel, params=params)
combobox = self._builder.get_object("neuronmodels")
liststore = combobox.get_model()
neuronmodel = liststore.get_value(combobox.get_active_iter(), 0)
neuron = nest.Create(neuronmodel,
params=self._neurondictview.get_params())
weight = self._builder.get_object("weight").get_value()
delay = self._builder.get_object("delay").get_value()
nest.Connect(stimulator, neuron, weight, delay)
sd = nest.Create("spike_detector", params={"record_to": ["memory"]})
nest.Connect(neuron, sd)
vm = nest.Create("voltmeter", params={"record_to": ["memory"],
"interval": 0.1})
nest.Connect(vm, neuron)
simtime = self._builder.get_object("simtime").get_value()
nest.Simulate(simtime)
self.update_figure(nest.GetStatus(sd, "events"),
nest.GetStatus(vm, "events"))
def on_delete_event(self, widget, event):
self.on_quit(widget)
return True
def on_quit(self, project):
self._builder.get_object("mainwindow").hide()
gtk.main_quit()
class DictView(gtk.TreeView):
def __init__(self, params=None):
gtk.TreeView.__init__(self)
if params:
self.params = params
self.repopulate()
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn("Name", renderer, text=1)
self.append_column(column)
renderer = gtk.CellRendererText()
renderer.set_property("mode", gtk.CELL_RENDERER_MODE_EDITABLE)
renderer.set_property("editable", True)
column = gtk.TreeViewColumn("Value", renderer, text=2)
self.append_column(column)
self.set_size_request(200, 150)
renderer.connect("edited", self.check_value)
self.show()
def repopulate(self):
model = gtk.TreeStore(gobject.TYPE_PYOBJECT, gobject.TYPE_STRING,
gobject.TYPE_STRING)
for key in sorted(self.params.keys()):
pos = model.insert_after(None, None)
data = {"key": key, "element_type": type(self.params[key])}
model.set_value(pos, 0, data)
model.set_value(pos, 1, str(key))
model.set_value(pos, 2, str(self.params[key]))
self.set_model(model)
def check_value(self, widget, path, new_text):
model = self.get_model()
data = model[path][0]
try:
typename = data["element_type"].__name__
new_value = eval("%s('%s')" % (typename, new_text))
if typename == "bool" and new_text.lower() in ["false", "0"]:
new_value = False
self.params[data["key"]] = new_value
model[path][2] = str(new_value)
except ValueError:
old_value = self.params[data["key"]]
model[path][2] = str(old_value)
def get_params(self):
return self.params
def set_params(self, params):
self.params = params
self.repopulate()
if __name__ == "__main__":
Main()
| gpl-2.0 |
shusenl/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
davidastephens/zipline | tests/test_examples.py | 3 | 1444 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is based on a unittest written by John Salvatier:
# https://github.com/pymc-devs/pymc/blob/pymc3/tests/test_examples.py
# Disable plotting
#
import matplotlib
matplotlib.use('Agg')
from os import path
import os
import fnmatch
import imp
def test_examples():
os.chdir(example_dir())
for fname in all_matching_files('.', '*.py'):
yield check_example, fname
def all_matching_files(d, pattern):
def addfiles(fls, dir, nfiles):
nfiles = fnmatch.filter(nfiles, pattern)
nfiles = [path.join(dir, f) for f in nfiles]
fls.extend(nfiles)
files = []
path.walk(d, addfiles, files)
return files
def example_dir():
import zipline
d = path.dirname(zipline.__file__)
return path.join(path.abspath(d), 'examples/')
def check_example(p):
imp.load_source('__main__', path.basename(p))
| apache-2.0 |
Windy-Ground/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
macks22/scikit-learn | sklearn/covariance/graph_lasso_.py | 127 | 25626 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state, check_array
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
enet_tol=1e-4, max_iter=100, verbose=False,
return_costs=False, eps=np.finfo(np.float64).eps,
return_n_iter=False):
"""l1-penalized covariance estimator
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, enet_tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars', return_path=False)
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alpha : positive float, default 0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
mode : {'cd', 'lars'}, default 'cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, default 1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, default 100
The maximum number of iterations.
verbose : boolean, default False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
assume_centered : boolean, default False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, enet_tol=1e-4,
max_iter=100, verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, enet_tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
enet_tol=enet_tol, max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
enet_tol : positive float, optional
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
enet_tol=1e-4, max_iter=100, mode='cd', n_jobs=1,
verbose=False, assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
"""Fits the GraphLasso covariance model to X.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol, enet_tol=self.enet_tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float64).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
enet_tol=self.enet_tol, max_iter=self.max_iter,
verbose=inner_verbose, return_n_iter=True)
return self
| bsd-3-clause |
linebp/pandas | pandas/tests/plotting/common.py | 3 | 19473 | #!/usr/bin/env python
# coding: utf-8
import pytest
import os
import warnings
from pandas import DataFrame, Series
from pandas.compat import zip, iteritems
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.api import is_list_like
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_is_valid_plot_return_object)
import numpy as np
from numpy import random
import pandas.plotting as plotting
from pandas.plotting._tools import _flatten
"""
This is a common base class used for various plotting tests
"""
tm._skip_if_no_mpl()
def _skip_if_no_scipy_gaussian_kde():
try:
from scipy.stats import gaussian_kde # noqa
except ImportError:
pytest.skip("scipy version doesn't support gaussian_kde")
def _ok_for_gaussian_kde(kind):
if kind in ['kde', 'density']:
try:
from scipy.stats import gaussian_kde # noqa
except ImportError:
return False
return True
class TestPlotBase(object):
def setup_method(self, method):
import matplotlib as mpl
mpl.rcdefaults()
self.mpl_le_1_2_1 = plotting._compat._mpl_le_1_2_1()
self.mpl_ge_1_3_1 = plotting._compat._mpl_ge_1_3_1()
self.mpl_ge_1_4_0 = plotting._compat._mpl_ge_1_4_0()
self.mpl_ge_1_5_0 = plotting._compat._mpl_ge_1_5_0()
self.mpl_ge_2_0_0 = plotting._compat._mpl_ge_2_0_0()
self.mpl_ge_2_0_1 = plotting._compat._mpl_ge_2_0_1()
if self.mpl_ge_1_4_0:
self.bp_n_objects = 7
else:
self.bp_n_objects = 8
if self.mpl_ge_1_5_0:
# 1.5 added PolyCollections to legend handler
# so we have twice as many items.
self.polycollection_factor = 2
else:
self.polycollection_factor = 1
if self.mpl_ge_2_0_0:
self.default_figsize = (6.4, 4.8)
else:
self.default_figsize = (8.0, 6.0)
self.default_tick_position = 'left' if self.mpl_ge_2_0_0 else 'default'
# common test data
from pandas import read_csv
base = os.path.join(os.path.dirname(curpath()), os.pardir)
path = os.path.join(base, 'tests', 'data', 'iris.csv')
self.iris = read_csv(path)
n = 100
with tm.RNGContext(42):
gender = np.random.choice(['Male', 'Female'], size=n)
classroom = np.random.choice(['A', 'B', 'C'], size=n)
self.hist_df = DataFrame({'gender': gender,
'classroom': classroom,
'height': random.normal(66, 4, size=n),
'weight': random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(
size=20)})
def teardown_method(self, method):
tm.close()
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@cache_readonly
def colorconverter(self):
import matplotlib.colors as colors
return colors.colorConverter
def _check_legend_labels(self, axes, labels=None, visible=True):
"""
Check each axes has expected legend labels
Parameters
----------
axes : matplotlib Axes object, or its list-like
labels : list-like
expected legend labels
visible : bool
expected legend visibility. labels are checked only when visible is
True
"""
if visible and (labels is None):
raise ValueError('labels must be specified when visible is True')
axes = self._flatten_visible(axes)
for ax in axes:
if visible:
assert ax.get_legend() is not None
self._check_text_labels(ax.get_legend().get_texts(), labels)
else:
assert ax.get_legend() is None
def _check_data(self, xp, rs):
"""
Check each axes has identical lines
Parameters
----------
xp : matplotlib Axes object
rs : matplotlib Axes object
"""
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
tm.assert_almost_equal(xpdata, rsdata)
assert len(xp_lines) == len(rs_lines)
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
def _check_visible(self, collections, visible=True):
"""
Check each artist is visible or not
Parameters
----------
collections : matplotlib Artist or its list-like
target Artist or its list or collection
visible : bool
expected visibility
"""
from matplotlib.collections import Collection
if not isinstance(collections,
Collection) and not is_list_like(collections):
collections = [collections]
for patch in collections:
assert patch.get_visible() == visible
def _get_colors_mapped(self, series, colors):
unique = series.unique()
# unique and colors length can be differed
# depending on slice value
mapped = dict(zip(unique, colors))
return [mapped[v] for v in series.values]
def _check_colors(self, collections, linecolors=None, facecolors=None,
mapping=None):
"""
Check each artist has expected line colors and face colors
Parameters
----------
collections : list-like
list or collection of target artist
linecolors : list-like which has the same length as collections
list of expected line colors
facecolors : list-like which has the same length as collections
list of expected face colors
mapping : Series
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
from matplotlib.lines import Line2D
from matplotlib.collections import (
Collection, PolyCollection, LineCollection
)
conv = self.colorconverter
if linecolors is not None:
if mapping is not None:
linecolors = self._get_colors_mapped(mapping, linecolors)
linecolors = linecolors[:len(collections)]
assert len(collections) == len(linecolors)
for patch, color in zip(collections, linecolors):
if isinstance(patch, Line2D):
result = patch.get_color()
# Line2D may contains string color expression
result = conv.to_rgba(result)
elif isinstance(patch, (PolyCollection, LineCollection)):
result = tuple(patch.get_edgecolor()[0])
else:
result = patch.get_edgecolor()
expected = conv.to_rgba(color)
assert result == expected
if facecolors is not None:
if mapping is not None:
facecolors = self._get_colors_mapped(mapping, facecolors)
facecolors = facecolors[:len(collections)]
assert len(collections) == len(facecolors)
for patch, color in zip(collections, facecolors):
if isinstance(patch, Collection):
# returned as list of np.array
result = patch.get_facecolor()[0]
else:
result = patch.get_facecolor()
if isinstance(result, np.ndarray):
result = tuple(result)
expected = conv.to_rgba(color)
assert result == expected
def _check_text_labels(self, texts, expected):
"""
Check each text has expected labels
Parameters
----------
texts : matplotlib Text object, or its list-like
target text, or its list
expected : str or list-like which has the same length as texts
expected text label, or its list
"""
if not is_list_like(texts):
assert texts.get_text() == expected
else:
labels = [t.get_text() for t in texts]
assert len(labels) == len(expected)
for l, e in zip(labels, expected):
assert l == e
def _check_ticks_props(self, axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
"""
Check each axes has expected tick properties
Parameters
----------
axes : matplotlib Axes object, or its list-like
xlabelsize : number
expected xticks font size
xrot : number
expected xticks rotation
ylabelsize : number
expected yticks font size
yrot : number
expected yticks rotation
"""
from matplotlib.ticker import NullFormatter
axes = self._flatten_visible(axes)
for ax in axes:
if xlabelsize or xrot:
if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
# If minor ticks has NullFormatter, rot / fontsize are not
# retained
labels = ax.get_xticklabels()
else:
labels = ax.get_xticklabels() + ax.get_xticklabels(
minor=True)
for label in labels:
if xlabelsize is not None:
tm.assert_almost_equal(label.get_fontsize(),
xlabelsize)
if xrot is not None:
tm.assert_almost_equal(label.get_rotation(), xrot)
if ylabelsize or yrot:
if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
labels = ax.get_yticklabels()
else:
labels = ax.get_yticklabels() + ax.get_yticklabels(
minor=True)
for label in labels:
if ylabelsize is not None:
tm.assert_almost_equal(label.get_fontsize(),
ylabelsize)
if yrot is not None:
tm.assert_almost_equal(label.get_rotation(), yrot)
def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'):
"""
Check each axes has expected scales
Parameters
----------
axes : matplotlib Axes object, or its list-like
xaxis : {'linear', 'log'}
expected xaxis scale
yaxis : {'linear', 'log'}
expected yaxis scale
"""
axes = self._flatten_visible(axes)
for ax in axes:
assert ax.xaxis.get_scale() == xaxis
assert ax.yaxis.get_scale() == yaxis
def _check_axes_shape(self, axes, axes_num=None, layout=None,
figsize=None):
"""
Check expected number of axes is drawn in expected layout
Parameters
----------
axes : matplotlib Axes object, or its list-like
axes_num : number
expected number of axes. Unnecessary axes should be set to
invisible.
layout : tuple
expected layout, (expected number of rows , columns)
figsize : tuple
expected figsize. default is matplotlib default
"""
if figsize is None:
figsize = self.default_figsize
visible_axes = self._flatten_visible(axes)
if axes_num is not None:
assert len(visible_axes) == axes_num
for ax in visible_axes:
# check something drawn on visible axes
assert len(ax.get_children()) > 0
if layout is not None:
result = self._get_axes_layout(_flatten(axes))
assert result == layout
tm.assert_numpy_array_equal(
visible_axes[0].figure.get_size_inches(),
np.array(figsize, dtype=np.float64))
def _get_axes_layout(self, axes):
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
def _flatten_visible(self, axes):
"""
Flatten axes, and filter only visible
Parameters
----------
axes : matplotlib Axes object, or its list-like
"""
axes = _flatten(axes)
axes = [ax for ax in axes if ax.get_visible()]
return axes
def _check_has_errorbars(self, axes, xerr=0, yerr=0):
"""
Check axes has expected number of errorbars
Parameters
----------
axes : matplotlib Axes object, or its list-like
xerr : number
expected number of x errorbar
yerr : number
expected number of y errorbar
"""
axes = self._flatten_visible(axes)
for ax in axes:
containers = ax.containers
xerr_count = 0
yerr_count = 0
for c in containers:
has_xerr = getattr(c, 'has_xerr', False)
has_yerr = getattr(c, 'has_yerr', False)
if has_xerr:
xerr_count += 1
if has_yerr:
yerr_count += 1
assert xerr == xerr_count
assert yerr == yerr_count
def _check_box_return_type(self, returned, return_type, expected_keys=None,
check_ax_title=True):
"""
Check box returned type is correct
Parameters
----------
returned : object to be tested, returned from boxplot
return_type : str
return_type passed to boxplot
expected_keys : list-like, optional
group labels in subplot case. If not passed,
the function checks assuming boxplot uses single ax
check_ax_title : bool
Whether to check the ax.title is the same as expected_key
Intended to be checked by calling from ``boxplot``.
Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
"""
from matplotlib.axes import Axes
types = {'dict': dict, 'axes': Axes, 'both': tuple}
if expected_keys is None:
# should be fixed when the returning default is changed
if return_type is None:
return_type = 'dict'
assert isinstance(returned, types[return_type])
if return_type == 'both':
assert isinstance(returned.ax, Axes)
assert isinstance(returned.lines, dict)
else:
# should be fixed when the returning default is changed
if return_type is None:
for r in self._flatten_visible(returned):
assert isinstance(r, Axes)
return
assert isinstance(returned, Series)
assert sorted(returned.keys()) == sorted(expected_keys)
for key, value in iteritems(returned):
assert isinstance(value, types[return_type])
# check returned dict has correct mapping
if return_type == 'axes':
if check_ax_title:
assert value.get_title() == key
elif return_type == 'both':
if check_ax_title:
assert value.ax.get_title() == key
assert isinstance(value.ax, Axes)
assert isinstance(value.lines, dict)
elif return_type == 'dict':
line = value['medians'][0]
axes = line.axes if self.mpl_ge_1_5_0 else line.get_axes()
if check_ax_title:
assert axes.get_title() == key
else:
raise AssertionError
def _check_grid_settings(self, obj, kinds, kws={}):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
import matplotlib as mpl
def is_grid_on():
xoff = all(not g.gridOn
for g in self.plt.gca().xaxis.get_major_ticks())
yoff = all(not g.gridOn
for g in self.plt.gca().yaxis.get_major_ticks())
return not (xoff and yoff)
spndx = 1
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=False)
obj.plot(kind=kind, **kws)
assert not is_grid_on()
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=True)
obj.plot(kind=kind, grid=False, **kws)
assert not is_grid_on()
if kind != 'pie':
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=True)
obj.plot(kind=kind, **kws)
assert is_grid_on()
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc('axes', grid=False)
obj.plot(kind=kind, grid=True, **kws)
assert is_grid_on()
def _maybe_unpack_cycler(self, rcParams, field='color'):
"""
Compat layer for MPL 1.5 change to color cycle
Before: plt.rcParams['axes.color_cycle'] -> ['b', 'g', 'r'...]
After : plt.rcParams['axes.prop_cycle'] -> cycler(...)
"""
if self.mpl_ge_1_5_0:
cyl = rcParams['axes.prop_cycle']
colors = [v[field] for v in cyl]
else:
colors = rcParams['axes.color_cycle']
return colors
def _check_plot_works(f, filterwarnings='always', **kwargs):
import matplotlib.pyplot as plt
ret = None
with warnings.catch_warnings():
warnings.simplefilter(filterwarnings)
try:
try:
fig = kwargs['figure']
except KeyError:
fig = plt.gcf()
plt.clf()
ax = kwargs.get('ax', fig.add_subplot(211)) # noqa
ret = f(**kwargs)
assert_is_valid_plot_return_object(ret)
try:
kwargs['ax'] = fig.add_subplot(212)
ret = f(**kwargs)
except Exception:
pass
else:
assert_is_valid_plot_return_object(ret)
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
tm.close(fig)
return ret
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
| bsd-3-clause |
ujvl/ray-ng | python/ray/tune/analysis/experiment_analysis.py | 1 | 7120 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
try:
import pandas as pd
except ImportError:
pd = None
from ray.tune.error import TuneError
from ray.tune.result import EXPR_PROGRESS_FILE, EXPR_PARAM_FILE, CONFIG_PREFIX
logger = logging.getLogger(__name__)
class Analysis(object):
"""Analyze all results from a directory of experiments."""
def __init__(self, experiment_dir):
experiment_dir = os.path.expanduser(experiment_dir)
if not os.path.isdir(experiment_dir):
raise ValueError(
"{} is not a valid directory.".format(experiment_dir))
self._experiment_dir = experiment_dir
self._configs = {}
self._trial_dataframes = {}
if not pd:
logger.warning(
"pandas not installed. Run `pip install pandas` for "
"Analysis utilities.")
else:
self.fetch_trial_dataframes()
def dataframe(self, metric=None, mode=None):
"""Returns a pandas.DataFrame object constructed from the trials.
Args:
metric (str): Key for trial info to order on.
If None, uses last result.
mode (str): One of [min, max].
"""
rows = self._retrieve_rows(metric=metric, mode=mode)
all_configs = self.get_all_configs(prefix=True)
for path, config in all_configs.items():
if path in rows:
rows[path].update(config)
rows[path].update(logdir=path)
return pd.DataFrame(list(rows.values()))
def get_best_config(self, metric, mode="max"):
"""Retrieve the best config corresponding to the trial.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
"""
rows = self._retrieve_rows(metric=metric, mode=mode)
all_configs = self.get_all_configs()
compare_op = max if mode == "max" else min
best_path = compare_op(rows, key=lambda k: rows[k][metric])
return all_configs[best_path]
def get_best_logdir(self, metric, mode="max"):
"""Retrieve the logdir corresponding to the best trial.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
"""
df = self.dataframe(metric=metric, mode=mode)
if mode == "max":
return df.iloc[df[metric].idxmax()].logdir
elif mode == "min":
return df.iloc[df[metric].idxmin()].logdir
def fetch_trial_dataframes(self):
fail_count = 0
for path in self._get_trial_paths():
try:
self.trial_dataframes[path] = pd.read_csv(
os.path.join(path, EXPR_PROGRESS_FILE))
except Exception:
fail_count += 1
if fail_count:
logger.debug(
"Couldn't read results from {} paths".format(fail_count))
return self.trial_dataframes
def get_all_configs(self, prefix=False):
"""Returns a list of all configurations.
Parameters:
prefix (bool): If True, flattens the config dict
and prepends `config/`.
"""
fail_count = 0
for path in self._get_trial_paths():
try:
with open(os.path.join(path, EXPR_PARAM_FILE)) as f:
config = json.load(f)
if prefix:
for k in list(config):
config[CONFIG_PREFIX + k] = config.pop(k)
self._configs[path] = config
except Exception:
fail_count += 1
if fail_count:
logger.warning(
"Couldn't read config from {} paths".format(fail_count))
return self._configs
def _retrieve_rows(self, metric=None, mode=None):
assert mode is None or mode in ["max", "min"]
rows = {}
for path, df in self.trial_dataframes.items():
if mode == "max":
idx = df[metric].idxmax()
elif mode == "min":
idx = df[metric].idxmin()
else:
idx = -1
rows[path] = df.iloc[idx].to_dict()
return rows
def _get_trial_paths(self):
_trial_paths = []
for trial_path, _, files in os.walk(self._experiment_dir):
if EXPR_PROGRESS_FILE in files:
_trial_paths += [trial_path]
if not _trial_paths:
raise TuneError("No trials found in {}.".format(
self._experiment_dir))
return _trial_paths
@property
def trial_dataframes(self):
"""List of all dataframes of the trials."""
return self._trial_dataframes
class ExperimentAnalysis(Analysis):
"""Analyze results from a Tune experiment.
Parameters:
experiment_checkpoint_path (str): Path to a json file
representing an experiment state. Corresponds to
Experiment.local_dir/Experiment.name/experiment_state.json
Example:
>>> tune.run(my_trainable, name="my_exp", local_dir="~/tune_results")
>>> analysis = ExperimentAnalysis(
>>> experiment_checkpoint_path="~/tune_results/my_exp/state.json")
"""
def __init__(self, experiment_checkpoint_path, trials=None):
"""Initializer.
Args:
experiment_path (str): Path to where experiment is located.
trials (list|None): List of trials that can be accessed via
`analysis.trials`.
"""
with open(experiment_checkpoint_path) as f:
_experiment_state = json.load(f)
self._experiment_state = _experiment_state
if "checkpoints" not in _experiment_state:
raise TuneError("Experiment state invalid; no checkpoints found.")
self._checkpoints = _experiment_state["checkpoints"]
self.trials = trials
super(ExperimentAnalysis, self).__init__(
os.path.dirname(experiment_checkpoint_path))
def stats(self):
"""Returns a dictionary of the statistics of the experiment."""
return self._experiment_state.get("stats")
def runner_data(self):
"""Returns a dictionary of the TrialRunner data."""
return self._experiment_state.get("runner_data")
def _get_trial_paths(self):
"""Overwrites Analysis to only have trials of one experiment."""
if self.trials:
_trial_paths = [t.logdir for t in self.trials]
else:
logger.warning("No `self.trials`. Drawing logdirs from checkpoint "
"file. This may result in some information that is "
"out of sync, as checkpointing is periodic.")
_trial_paths = [
checkpoint["logdir"] for checkpoint in self._checkpoints
]
if not _trial_paths:
raise TuneError("No trials found.")
return _trial_paths
| apache-2.0 |
epascale/pyCIRSF | photom_v4.py | 1 | 4370 | import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import astropy.wcs as wcs
from photutils.background import Background2D
from photutils import CircularAperture
from astropy.convolution import convolve, Gaussian2DKernel
from scipy.signal import medfilt2d, medfilt
import pyCIRSF as irsf
import os, glob, sys
date = '160127'
band = 'h'
flat_suffix = 'flat_160128_v2'
flat_suffix = 'cflat'
dark_suffix = 'dark_master'
object_name = 'WASP-121'
Nstack = 127
r0 = 15
astrom_data_path = os.path.join('~/IRSF/proc_data_ep', date)
raw_data_path = os.path.join('~/IRSF/data', date, 'rawdata')
cal_path = '~/IRSF/calibration'
cat_fname = '~/IRSF/etc/ref_cat_2mass.dat'
wb_fname = '~/IRSF/etc/IRSF_newflags_v2.xlsx'
cat_2mass, cat_ra, cat_dec = irsf.lib.get_reference_cat(
fname=os.path.expanduser(cat_fname),
Jmag_lim=10.6)
dark = irsf.lib.get_dark(os.path.expanduser(os.path.join(
cal_path, band+dark_suffix+'.fits.fz')), flag_mask=0x1 | 0x4)
flat = irsf.lib.get_flat(os.path.expanduser(os.path.join(
cal_path, band+flat_suffix+'.fits')), flag_mask=0x1 | 0x4)
fr_tab = irsf.lib.get_frames_flags(os.path.expanduser(wb_fname),
date, object_name)
#plt.ion();
plt.figure(123); plt.clf()
fig, ( (ax0, ax1, ax2), (ax3, ax4, ax5) ) = plt.subplots(nrows=2,
ncols=3,
num=123)
for fr in fr_tab['Frame'][:1]:
wcs_fn = os.path.expanduser(os.path.join(astrom_data_path,
'{:s}{:s}_{:04d}.fits'.format(band, date, fr)))
raw_fn = os.path.expanduser(os.path.join(raw_data_path,
'{:s}{:s}_{:04d}.fits.fz'.format(band, date, fr)))
hdulist = fits.open(wcs_fn)
hdr = hdulist[0].header
hdulist.close()
hdulist = fits.open(raw_fn)
ima =irsf.lib.apply_dark_flat(hdulist[1].data, dark=dark, flat=flat)
hdulist.close()
w = wcs.WCS(hdr)
print 'filtering ...'
cat_2mass_, ra_, dec_ = irsf.lib.get_reference_cat(
fname=os.path.expanduser(cat_fname),
Jmag_lim=18)
cat_x_, cat_y_ = w.all_world2pix(ra_.degree, dec_, 0)
ima, background = irsf.medianfilter.remove_background(ima, source_x = cat_x_, source_y = cat_y_, source_r = r0)
print 'done.'
# Create the stack
cat_x, cat_y = w.all_world2pix(cat_ra.degree, cat_dec, 0)
stack, lbx, lby = irsf.lib.stacking(ima, cat_x, cat_y, N=Nstack,
remove_background=False)
pos = (lbx, lby)
radii = np.linspace(1, 50, 40)
flux = []
for r in radii:
flux_, ap, map_area = irsf.lib.photom(stack, pos, r, r_in=50, r_out=60, mode='median')
flux.append(flux_)
vmin, vmax = np.percentile(ima.filled().flatten(), (5, 99.9))
im0 = ax0.imshow(ima, interpolation='none', cmap='gist_heat', vmin=vmin, vmax=vmax)
ax0.contour(background, colors='w', alpha=0.2)
ax0.format_coord = irsf.lib.Formatter(im0)
ax0.autoscale(False)
ax0.plot(cat_x, cat_y, '+r')
apertures = CircularAperture( (cat_x, cat_y), r = r0)
apertures.plot(ax = ax0, color='r')
im1 = ax1.imshow(np.log10(stack-stack.min()+1e-6), interpolation='none', cmap='gist_heat')
ax1.format_coord = irsf.lib.Formatter(im1)
ax2.plot(np.ma.median(ima, axis=0), 'r', label='gradient across x')
ax2.plot(np.ma.median(ima, axis=1), 'b', label='gradient across y')
#ax2.plot(np.ma.median(background, axis=0), 'm')
#ax2.plot(np.ma.median(background, axis=1), 'c')
ax2.legend()
ax3.plot(radii, flux, 'o-k')
ax3.grid()
ax3t = ax3.twinx()
ax3t.plot(stack.sum(axis=0)[stack.shape[1]//2:], 'r', label='$\int\, PSF\, dy$')
ax3t.plot(stack.sum(axis=1)[stack.shape[0]//2:], 'b', label='$\int\, PSF\, dx$')
ax4.plot(stack.sum(axis=0), 'r', label='$\int\, PSF\, dy$')
ax4.plot(stack.sum(axis=1), 'b', label='$\int\, PSF\, dx$')
ymin, ymax = ax4.get_ylim()
ax4.vlines(lbx, ymin, ymax, colors='r')
ax4.vlines(lby, ymin, ymax, colors='b')
ax4t = ax4.twinx()
ax4t.plot(stack.sum(axis=0).cumsum(), 'r')
ax4t.plot(stack.sum(axis=1).cumsum(), 'b')
ax4.grid()
ax4.legend()
vmin, vmax = np.percentile(background.flatten(), (5, 99.9))
im5 = ax5.imshow(background.filled(), interpolation='none', cmap='gist_heat', vmin=1000, vmax=1100)
ax5.format_coord = irsf.lib.Formatter(im5)
ax5.grid()
plt.show()
| gpl-3.0 |
hasecbinusr/pysal | pysal/spreg/opt.py | 8 | 2370 | import copy
def simport(modname):
"""
Safely import a module without raising an error.
Parameters
-----------
modname : str
module name needed to import
Returns
--------
tuple of (True, Module) or (False, None) depending on whether the import
succeeded.
Notes
------
Wrapping this function around an iterative context or a with context would
allow the module to be used without necessarily attaching it permanently in
the global namespace:
>>> for t,mod in simport('pandas'):
if t:
mod.DataFrame()
else:
#do alternative behavior here
del mod #or don't del, your call
instead of:
>>> t, mod = simport('pandas')
>>> if t:
mod.DataFrame()
else:
#do alternative behavior here
The first idiom makes it work kind of a like a with statement.
"""
try:
exec('import {}'.format(modname))
return True, eval(modname)
except:
return False, None
def requires(*args, **kwargs):
"""
Decorator to wrap functions with extra dependencies:
Arguments
---------
args : list
list of strings containing module to import
verbose : bool
boolean describing whether to print a warning message on import
failure
Returns
-------
Original function is all arg in args are importable, otherwise returns a
function that passes.
"""
v = kwargs.pop('verbose', True)
wanted = copy.deepcopy(args)
def inner(function):
available = [simport(arg)[0] for arg in args]
if all(available):
return function
else:
def passer(*args,**kwargs):
if v:
missing = [arg for i, arg in enumerate(wanted) if not available[i]]
print('missing dependencies: {d}'.format(d=missing))
print('not running {}'.format(function.__name__))
else:
pass
return passer
return inner
if __name__ == '__main__':
@requires('pandas')
def test():
import pandas
print('ASDF')
@requires('thisisnotarealmodule')
def test2():
print('you shouldnt see this')
test()
test2()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.