repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kelle/astropy | astropy/table/table.py | 1 | 104476 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..extern.six.moves import zip, range
from .index import TableIndices, TableLoc, TableILoc
import re
import sys
from collections import OrderedDict, Mapping
import warnings
from copy import deepcopy
import numpy as np
from numpy import ma
from .. import log
from ..io import registry as io_registry
from ..units import Quantity
from ..utils import isiterable, ShapedLikeNDArray
from ..utils.compat.numpy import broadcast_to as np_broadcast_to
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy)
from .row import Row
from .np_utils import fix_column_name, recarray_fromrecords
from .info import TableInfo
from .index import Index, _IndexModeContext, get_index
from . import conf
__doctest_skip__ = ['Table.read', 'Table.write',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
return hasattr(obj, 'info') and isinstance(obj.info, cls)
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super(TableColumns, self).__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, six.string_types):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return self.values()[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.values()[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value):
if item in self:
raise ValueError("Cannot replace column '{0}'. Use Table.replace_column() instead."
.format(item))
super(TableColumns, self).__setitem__(item, value)
def __repr__(self):
names = ("'{0}'".format(x) for x in six.iterkeys(self))
return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__)
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError("Column {0} already exists".format(new_name))
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(six.itervalues(self))
self.clear()
self.update(list(zip(new_names, cols)))
# Define keys and values for Python 2 and 3 source compatibility
def keys(self):
return list(OrderedDict.keys(self))
def values(self):
return list(OrderedDict.values(self))
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class Table(object):
"""A class to represent tables of heterogeneous data.
`Table` provides a class for heterogeneous tabular data, making use of a
`numpy` structured array internally to store the data values. A key
enhancement provided by the `Table` class is the ability to easily modify
the structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`Table` differs from `~astropy.nddata.NDData` by the assumption that the
input data consists of columns of homogeneous data, where each column
has a unique identifier and may contain additional metadata such as the
data unit, format, and description.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
.. note::
If the input is a Table the ``meta`` is always copied regardless of the
``copy`` parameter.
"""
meta = MetaData()
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
def as_array(self, keep_byteorder=False):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
Returns
-------
table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)
Copy of table as a numpy structured array
"""
if len(self.columns) == 0:
return None
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
native_order = ('=', sys_byteorder)
dtype = []
cols = self.columns.values()
for col in cols:
col_descr = descr(col)
byteorder = col.info.dtype.byteorder
if not keep_byteorder and byteorder not in native_order:
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
empty_init = ma.empty if self.masked else np.empty
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
return data
def __init__(self, data=None, masked=None, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.meta = meta
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
is_list_of_dict = False
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if all(isinstance(row, dict) for row in rows):
is_list_of_dict = True # Avoid doing the all(...) test twice.
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
rec_data = recarray_fromrecords(rows)
data = [rec_data[name] for name in rec_data.dtype.names]
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied (though the meta
# will be deep-copied anyway).
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray) and
data.shape == (0,) and
not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
init_func = self._init_from_list
if data and (is_list_of_dict or all(isinstance(row, dict) for row in data)):
n_cols = len(data[0])
else:
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
init_func = self._init_from_table
n_cols = len(data.colnames)
default_names = data.colnames
# don't copy indices if the input Table is in non-copy mode
self._init_indices = self._init_indices and data._copy_indices
elif data is None:
if names is None:
if dtype is None:
return # Empty table
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError('Data type {0} not allowed to init Table'
.format(type(data)))
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if names is None:
names = default_names or [None] * n_cols
if dtype is None:
dtype = [None] * n_cols
# Numpy does not support Unicode column names on Python 2, or
# bytes column names on Python 3, so fix them up now.
names = [fix_column_name(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Whatever happens above, the masked property should be set to a boolean
if type(self.masked) is not bool:
raise TypeError("masked property has not been set to True or False")
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked:
mask_table = Table([col.mask for col in self.columns.values()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : Table
New table with masked values filled
"""
if self.masked:
data = [col.filled(fill_value) for col in six.itervalues(self.columns)]
else:
data = self
return self.__class__(data, meta=deepcopy(self.meta))
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
FastBST, and FastRBT. If the supplied argument is None (by
default), use SortedArray.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, six.string_types):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{0}", of '
'type "{1}"'.format(col.info.name, type(col)))
index = Index(columns, engine=engine, unique=unique)
if not self.indices:
self.primary_key = colnames
for col in columns:
col.info.indices.append(index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
return self.as_array().data if self.masked else self.as_array()
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError('{0} must be a list or None'.format(inp_str))
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
.format(inp_str))
def _set_masked_from_cols(self, cols):
if self.masked is None:
if any(isinstance(col, (MaskedColumn, ma.MaskedArray)) for col in cols):
self._set_masked(True)
else:
self._set_masked(False)
elif not self.masked:
if any(np.any(col.mask) for col in cols if isinstance(col, (MaskedColumn, ma.MaskedArray))):
self._set_masked(True)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
names_from_data = set()
for row in data:
names_from_data.update(row)
cols = {}
for name in names_from_data:
cols[name] = []
for i, row in enumerate(data):
try:
cols[name].append(row[name])
except KeyError:
raise ValueError('Row {0} has no value for column {1}'.format(i, name))
if all(name is None for name in names):
names = sorted(names_from_data)
self._init_from_dict(cols, names, dtype, n_cols, copy)
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of columns. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
if data and all(isinstance(row, dict) for row in data):
self._init_from_list_of_dicts(data, names, dtype, n_cols, copy)
return
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(data)
cols = []
def_names = _auto_names(n_cols)
for col, name, def_name, dtype in zip(data, names, def_names, dtype):
# Structured ndarray gets viewed as a mixin
if isinstance(col, np.ndarray) and len(col.dtype) > 1:
col = col.view(NdarrayMixin)
if isinstance(col, (Column, MaskedColumn)):
col = self.ColumnClass(name=(name or col.info.name or def_name),
data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
elif self._add_as_mixin_column(col):
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
if copy:
col = col_copy(col, copy_indices=self._init_indices)
col.info.name = name or col.info.name or def_name
elif isinstance(col, np.ndarray) or isiterable(col):
col = self.ColumnClass(name=(name or def_name), data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
else:
raise ValueError('Elements in list initialization must be '
'either Column or list-like')
cols.append(col)
self._init_from_cols(cols)
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(cols)
if copy:
self._init_from_list(cols, names, dtype, n_cols, copy)
else:
dtype = [(name, col.dtype, col.shape[1:]) for name, col in zip(names, cols)]
newdata = data.view(dtype).ravel()
columns = self.TableColumns()
for name in names:
columns[name] = self.ColumnClass(name=name, data=newdata[name])
columns[name].info.parent_table = self
self.columns = columns
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
# TODO: is this restriction still needed with no ndarray?
if not copy:
raise ValueError('Cannot use copy=False with a dict data input')
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _init_from_table(self, data, names, dtype, n_cols, copy):
"""Initialize table from an existing Table object """
table = data # data is really a Table, rename for clarity
self.meta.clear()
self.meta.update(deepcopy(table.meta))
self.primary_key = table.primary_key
cols = list(table.columns.values())
self._init_from_list(cols, names, dtype, n_cols, copy)
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if col.__class__ is not self.ColumnClass and isinstance(col, Column):
col = self.ColumnClass(col) # copy attributes and reference data
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) != 1:
raise ValueError('Inconsistent data column lengths: {0}'
.format(lengths))
# Set the table masking
self._set_masked_from_cols(cols)
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
table.meta.clear()
table.meta.update(deepcopy(self.meta))
table.primary_key = self.primary_key
cols = self.columns.values()
newcols = []
for col in cols:
col.info._copy_indices = self._copy_indices
newcol = col[slice_]
if col.info.indices:
newcol = col.info.slice_indices(newcol, slice_, len(col))
newcols.append(newcol)
col.info._copy_indices = True
self._make_table_from_cols(table, newcols)
return table
@staticmethod
def _make_table_from_cols(table, cols):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
colnames = set(col.info.name for col in cols)
if None in colnames:
raise TypeError('Cannot have None for column name')
if len(colnames) != len(cols):
raise ValueError('Duplicate column names')
columns = table.TableColumns((col.info.name, col) for col in cols)
for col in cols:
col.info.parent_table = table
if table.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
table.columns = columns
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(self)))
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from ..utils.xml.writer import xml_escape
descr = xml_escape(descr)
if tableid is None:
tableid = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
if six.PY2 and isinstance(out, six.text_type):
out = out.encode('utf-8')
return out
def _repr_html_(self):
return self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __unicode__(self):
return '\n'.join(self.pformat())
if not six.PY2:
__str__ = __unicode__
def __bytes__(self):
return six.text_type(self).encode('utf-8')
if six.PY2:
__str__ = __bytes__
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not not Quantity (which gets converted to Column with
# unit set).
return has_info_class(col, MixinInfo) and not isinstance(col, Quantity)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int
Maximum number of lines in table output.
max_width : int or `None`
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + self.columns.values(),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or `None`
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <http://getbootstrap.com/css/#tables>`_
for the list of classes.
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = 'table{0}-{1}'.format(id(self),
np.random.randint(1, 1e6))
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or `None`
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in http://www.datatables.net/manual/styling/classes
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from ..extern.six.moves.urllib.parse import urljoin
from ..extern.six.moves.urllib.request import pathname2url
from .jsviewer import DEFAULT_CSS
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error("Browser '{}' not found.".format(browser))
else:
br.open(urljoin('file:', pathname2url(path)))
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int or `None`
Maximum number of rows to output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or `None`
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
return lines
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, six.string_types):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif (isinstance(item, (tuple, list)) and item and
all(isinstance(x, six.string_types) for x in item)):
bad_names = [x for x in item if x not in self.colnames]
if bad_names:
raise ValueError('Slice name(s) {0} not valid column name(s)'
.format(', '.join(bad_names)))
out = self.__class__([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
elif ((isinstance(item, np.ndarray) and item.size == 0) or
(isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, six.string_types) and item not in self.colnames:
NewColumn = self.MaskedColumn if self.masked else self.Column
# If value doesn't have a dtype and won't be added as a mixin then
# convert to a numpy array.
if not hasattr(value, 'dtype') and not self._add_as_mixin_column(value):
value = np.asarray(value)
# Structured ndarray gets viewed as a mixin
if isinstance(value, np.ndarray) and len(value.dtype) > 1:
value = value.view(NdarrayMixin)
# Make new column and assign the value. If the table currently
# has no rows (len=0) of the value is already a Column then
# define new column directly from value. In the latter case
# this allows for propagation of Column metadata. Otherwise
# define a new column with the right length and shape and then
# set it from value. This allows for broadcasting, e.g. t['a']
# = 1.
name = item
# If this is a column-like object that could be added directly to table
if isinstance(value, BaseColumn) or self._add_as_mixin_column(value):
# If we're setting a new column to a scalar, broadcast it.
# (things will fail in _init_from_cols if this doesn't work)
if (len(self) > 0 and (getattr(value, 'isscalar', False) or
getattr(value, 'shape', None) == () or
len(value) == 1)):
new_shape = (len(self),) + getattr(value, 'shape', ())[1:]
if isinstance(value, np.ndarray):
value = np_broadcast_to(value, shape=new_shape,
subok=True)
elif isinstance(value, ShapedLikeNDArray):
value = value._apply(np_broadcast_to, shape=new_shape,
subok=True)
new_column = col_copy(value)
new_column.info.name = name
elif len(self) == 0:
new_column = NewColumn(value, name=name)
else:
new_column = NewColumn(name=name, length=len(self), dtype=value.dtype,
shape=value.shape[1:],
unit=getattr(value, 'unit', None))
new_column[:] = value
# Now add new column to the table
self.add_columns([new_column], copy=False)
else:
n_cols = len(self.columns)
if isinstance(item, six.string_types):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
# Set the corresponding row assuming value is an iterable.
if not hasattr(value, '__len__'):
raise TypeError('Right side value must be iterable')
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
for col, val in zip(self.columns.values(), value):
col[item] = val
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
(isinstance(item, tuple) and # output from np.where
all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
import itertools
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __delitem__(self, item):
if isinstance(item, six.string_types):
self.remove_column(item)
elif isinstance(item, tuple):
self.remove_columns(item)
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if hasattr(self, '_masked'):
# The only allowed change is from None to False or True, or False to True
if self._masked is None and masked in [False, True]:
self._masked = masked
elif self._masked is False and masked is True:
log.info("Upgrading Table to masked Table. Use Table.filled() to convert to unmasked table.")
self._masked = masked
elif self._masked is masked:
raise Exception("Masked attribute is already set to {0}".format(masked))
else:
raise Exception("Cannot change masked attribute to {0} once it is set to {1}"
.format(masked, self._masked))
else:
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
if self._masked:
self._column_class = self.MaskedColumn
else:
self._column_class = self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
def keys(self):
return list(self.columns.keys())
def __len__(self):
if len(self.columns) == 0:
return 0
lengths = set(len(col) for col in self.columns.values())
if len(lengths) != 1:
len_strs = [' {0} : {1}'.format(name, len(col)) for name, col in self.columns.items()]
raise ValueError('Column length mismatch:\n{0}'.format('\n'.join(len_strs)))
return lengths.pop()
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError("Column {0} does not exist".format(name))
def add_column(self, col, index=None, name=None, rename_duplicate=False):
"""
Add a new Column object ``col`` to the table. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
Parameters
----------
col : Column
Column object to add.
index : int or `None`
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create a third column 'c' and append it to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> col_d = Column(name='d', data=['a', 'b', 'c'])
>>> t.add_column(col_d, 1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
3 c 0.3 z
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> t.add_column(col_b, rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.2
3 0.3 1.3
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(data=['x', 'y'])
>>> t.add_column(col_c)
>>> t.add_column(col_c, name='c')
>>> col_b = Column(name='b', data=[1.1, 1.2])
>>> t.add_column(col_b, name='d')
>>> print(t)
a b col2 c d
--- --- ---- --- ---
1 0.1 x x 1.1
2 0.2 y y 1.2
To add several columns use add_columns.
"""
if index is None:
index = len(self.columns)
if name is not None:
name = (name,)
self.add_columns([col], [index], name, rename_duplicate=rename_duplicate)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new Column objects ``cols`` to the table. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
Parameters
----------
cols : list of Columns
Column objects to add.
indexes : list of ints or `None`
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create column 'c' and 'd' and append them to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
3 0.3 z w
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d], [0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
z 3 w 0.3
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_columns([col_b, col_c], rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
3 0.3 1.3 z
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_a = Column(data=['x', 'y'])
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([col_a, col_b])
>>> t.add_columns([col_a, col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
x u x u
y v y v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if copy:
cols = [col_copy(col) for col in cols]
if len(self.columns) == 0:
# No existing table data, init from cols
newcols = cols
else:
newcols = list(self.columns.values())
new_indexes = list(range(len(newcols) + 1))
for col, index in zip(cols, indexes):
i = new_indexes.index(index)
new_indexes.insert(i, None)
newcols.insert(i, col)
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
for i, (col, name) in enumerate(zip(cols, names)):
if name is None:
if col.info.name is not None:
continue
name = 'col{}'.format(i + len(self.columns))
if col.info.parent_table is not None:
col = col_copy(col)
col.info.name = name
if rename_duplicate:
existing_names = set(self.colnames)
for col in cols:
i = 1
orig_name = col.info.name
while col.info.name in existing_names:
# If the column belongs to another table then copy it
# before renaming
if col.info.parent_table is not None:
col = col_copy(col)
new_name = '{0}_{1}'.format(orig_name, i)
col.info.name = new_name
i += 1
existing_names.add(new_name)
self._init_from_cols(newcols)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn("replaced column '{}'".format(name),
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col):
"""
Replace column ``name`` with the new ``col`` object.
Parameters
----------
name : str
Name of column to replace
col : column object (list, ndarray, Column, etc)
New column object to replace the existing column
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError('column name {0} is not in the table'.format(name))
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
t = self.__class__([col], names=[name])
cols = OrderedDict(self.columns)
cols[name] = t[name]
self._init_from_cols(cols.values())
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (six.integer_types, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice, int, or array of ints
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=np.bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
if isinstance(names, six.string_types):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
for name in names:
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind, python3_only):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
python3_only : bool
Only do this operation for Python 3
"""
if python3_only and six.PY2:
return
# If there are no `in_kind` columns then do nothing
cols = self.columns.values()
if not any(col.dtype.kind == in_kind for col in cols):
return
newcols = []
for col in cols:
if col.dtype.kind == in_kind:
newdtype = re.sub(in_kind, out_kind, col.dtype.str)
newcol = col.__class__(col, dtype=newdtype)
else:
newcol = col
newcols.append(newcol)
self._init_from_cols(newcols)
def convert_bytestring_to_unicode(self, python3_only=False):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') assuming
ASCII encoding.
Internally this changes string columns to represent each character in the string
with a 4-byte UCS-4 equivalent, so it is inefficient for memory but allows Python
3 scripts to manipulate string arrays with natural syntax.
The ``python3_only`` parameter is provided as a convenience so that code can
be written in a Python 2 / 3 compatible way::
>>> t = Table.read('my_data.fits')
>>> t.convert_bytestring_to_unicode(python3_only=True)
Parameters
----------
python3_only : bool
Only do this operation for Python 3
"""
self._convert_string_dtype('S', 'U', python3_only)
def convert_unicode_to_bytestring(self, python3_only=False):
"""
Convert ASCII-only unicode columns (dtype.kind='U') to bytestring (dtype.kind='S').
When exporting a unicode string array to a file in Python 3, it may be desirable
to encode unicode columns as bytestrings. This routine takes advantage of numpy
automated conversion which works for strings that are pure ASCII.
The ``python3_only`` parameter is provided as a convenience so that code can
be written in a Python 2 / 3 compatible way::
>>> t.convert_unicode_to_bytestring(python3_only=True)
>>> t.write('my_data.fits')
Parameters
----------
python3_only : bool
Only do this operation for Python 3
"""
self._convert_string_dtype('U', 'S', python3_only)
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : list
A list containing the names of the columns to keep. All other
columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Specifying only a single column name keeps only this column.
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Specifying a list of column names is keeps is also possible.
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
if isinstance(names, six.string_types):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
remove = list(set(self.keys()) - set(names))
self.remove_columns(remove)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError("Column {0} does not exist".format(name))
self.columns[name].info.name = new_name
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {0} is out of bounds for table with length {1}"
.format(index, N))
if index < 0:
index += N
def _is_mapping(obj):
"""Minimal checker for mapping (dict-like) interface for obj"""
attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')
return all(hasattr(obj, attr) for attr in attrs)
if mask is not None and not self.masked:
# Possibly issue upgrade warning and update self.ColumnClass. This
# does not change the existing columns.
self._set_masked(True)
if _is_mapping(vals) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not _is_mapping(mask):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError("Value must be supplied for column '{0}'".format(name))
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or _is_mapping(mask)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
columns = self.TableColumns()
try:
# Insert val at index for each column
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
# If the new row caused a change in self.ColumnClass then
# Column-based classes need to be converted first. This is
# typical for adding a row with mask values to an unmasked table.
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col = self.ColumnClass(col, copy=False)
newcol = col.insert(index, val, axis=0)
if not isinstance(newcol, BaseColumn):
newcol.info.name = name
if self.masked:
newcol.mask = FalseArray(newcol.shape)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {0} after inserting {1}'
' (expected {2}, got {3})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed
if self.masked:
newcol.mask[index] = mask_
columns[name] = newcol
# insert row in indices
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{0}':\n{1}"
.format(name, err))
else:
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def argsort(self, keys=None, kind=None):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, six.string_types):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, self[keys])
if index is not None:
return index.sorted_data()
kwargs = {}
if keys:
kwargs['order'] = keys
if kind:
kwargs['kind'] = kind
if keys:
data = self[keys].as_array()
else:
data = self.as_array()
return data.argsort(**kwargs)
def sort(self, keys=None):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name','firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, six.string_types):
keys = [keys]
indexes = self.argsort(keys)
sort_index = get_index(self, self[keys])
if sort_index is not None:
# avoid inefficient relabelling of sorted index
prev_frozen = sort_index._frozen
sort_index._frozen = True
for col in self.columns.values():
col[:] = col.take(indexes, axis=0)
if sort_index is not None:
# undo index freeze
sort_index._frozen = prev_frozen
# now relabel the sort index appropriately
sort_index.sort()
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
col[:] = col[::-1]
for index in self.indices:
index.reverse()
@classmethod
def read(cls, *args, **kwargs):
"""
Read and parse a data table and return as a Table.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily reading a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table.read('table.dat', format='ascii')
>>> events = Table.read('events.fits', format='fits')
The arguments and keywords (other than ``format``) provided to this function are
passed through to the underlying data reader (e.g. `~astropy.io.ascii.read`).
"""
return io_registry.read(cls, *args, **kwargs)
def write(self, *args, **kwargs):
"""
Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
The arguments and keywords (other than ``format``) provided to this function are
passed through to the underlying data reader (e.g. `~astropy.io.ascii.write`).
"""
io_registry.write(self, *args, **kwargs)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array
.. note::
The ``meta`` is always deepcopied regardless of the value for
``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() < {0}".
format(str(type(other))))
else:
return super(Table, self).__lt__(other)
def __gt__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() > {0}".
format(str(type(other))))
else:
return super(Table, self).__gt__(other)
def __le__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() <= {0}".
format(str(type(other))))
else:
return super(Table, self).__le__(other)
def __ge__(self, other):
if six.PY2:
raise TypeError("unorderable types: Table() >= {0}".
format(str(type(other))))
else:
return super(Table, self).__ge__(other)
def __eq__(self, other):
if isinstance(other, Table):
other = other.as_array()
if self.masked:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def __ne__(self, other):
return ~self.__eq__(other)
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`TableGroups` which contains a copy of this table but sorted by row
according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `Table`
Key grouping object
Returns
-------
out : `Table`
New table with groups set
"""
if self.has_mixin_columns:
raise NotImplementedError('group_by not available for tables with mixin columns')
return groups.table_group_by(self, keys)
def to_pandas(self):
"""
Return a :class:`pandas.DataFrame` instance
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table contains mixin or multi-dimensional columns
"""
from pandas import DataFrame
if self.has_mixin_columns:
raise ValueError("Cannot convert a table with mixin columns to a pandas DataFrame")
if any(getattr(col, 'ndim', 1) > 1 for col in self.columns.values()):
raise ValueError("Cannot convert a table with multi-dimensional columns to a pandas DataFrame")
out = OrderedDict()
for name, column in self.columns.items():
if isinstance(column, MaskedColumn):
if column.dtype.kind in ['i', 'u']:
out[name] = column.astype(float).filled(np.nan)
elif column.dtype.kind in ['f', 'c']:
out[name] = column.filled(np.nan)
else:
out[name] = column.astype(np.object).filled(np.nan)
else:
out[name] = column
if out[name].dtype.byteorder not in ('=', '|'):
out[name] = out[name].byteswap().newbyteorder()
return DataFrame(out)
@classmethod
def from_pandas(cls, dataframe):
"""
Create a `Table` from a :class:`pandas.DataFrame` instance
Parameters
----------
dataframe : :class:`pandas.DataFrame`
The pandas :class:`pandas.DataFrame` instance
Returns
-------
table : `Table`
A `Table` (or subclass) instance
"""
out = OrderedDict()
for name in dataframe.columns:
column = dataframe[name]
mask = np.array(column.isnull())
data = np.array(column)
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = six.string_types
if not six.PY2:
string_types += (bytes,)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask)
else:
out[name] = Column(data=data, name=name)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`QTable` provides a class for heterogeneous tabular data which can be
easily modified, for instance adding columns or new rows.
The `QTable` class is identical to `Table` except that columns with an
associated ``unit`` attribute are converted to `~astropy.units.Quantity`
objects.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if (isinstance(col, Column) and getattr(col, 'unit', None) is not None):
# We need to turn the column into a quantity, or a subclass
# identified in the unit (such as u.mag()).
q_cls = getattr(col.unit, '_quantity_class', Quantity)
qcol = q_cls(col.data, col.unit, copy=False)
qcol.info = col.info
col = qcol
else:
col = super(QTable, self)._convert_col_for_table(col)
return col
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ndarray().
"""
info = ParentDtypeInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if six.callable(super(NdarrayMixin, self).__array_finalize__):
super(NdarrayMixin, self).__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super(NdarrayMixin, self).__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super(NdarrayMixin, self).__setstate__(nd_state)
self.__dict__.update(own_state)
| bsd-3-clause |
ProkopHapala/ProbeParticleModel | pyProbeParticle/PPPlot.py | 1 | 6520 | #!/usr/bin/python
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
#params = None
# =========== defaults
default_figsize = (8,8)
default_cmap = 'gray'
default_interpolation = 'bicubic'
default_atom_size = 0.10
# =========== Utils
def plotBonds( xyz, bonds ):
for b in bonds:
i=b[0]; j=b[1]
plt.arrow(xyz[1][i], xyz[2][i], xyz[1][j]-xyz[1][i], xyz[2][j]-xyz[2][i], head_width=0.0, head_length=0.0, fc='k', ec='k', lw= 1.0,ls='solid' )
def plotAtoms( atoms, atomSize=default_atom_size, edge=True, ec='k', color='w' ):
plt.fig = plt.gcf()
es = atoms[0]
xs = atoms[1]
ys = atoms[2]
if len( atoms ) > 4:
colors = atoms[4]
else:
colors = [ color ]*100
for i in range(len(atoms[1])):
fc = '#%02x%02x%02x' % colors[ i ]
if not edge:
ec=fc
circle=plt.Circle( ( xs[i], ys[i] ), atomSize, fc=fc, ec=ec )
plt.fig.gca().add_artist(circle)
def plotGeom( atoms=None, bonds=None, atomSize=default_atom_size ):
if (bonds is not None) and (atoms is not None):
plotBonds( atoms, bonds )
if atoms is not None:
plotAtoms( atoms, atomSize=atomSize )
def colorize_XY2RG( Xs, Ys ):
r = np.sqrt(Xs**2 + Ys**2)
vmax = r[5:-5,5:-5].max()
Red = 0.5*Xs/vmax + 0.5
Green = 0.5*Ys/vmax + 0.5
c = np.array( (Red, Green, 0.5*np.ones(np.shape(Red)) ) ) # --> array of (3,n,m) shape, but need (n,m,3)
c = c.swapaxes(0,2)
c = c.swapaxes(0,1)
return c, vmax
# =========== plotting functions
def plotImages(
prefix, F, slices,
extent=None, zs = None, figsize=default_figsize,
cmap=default_cmap, interpolation=default_interpolation, vmin=None, vmax=None, cbar=False,
atoms=None, bonds=None, atomSize=default_atom_size
):
for ii,i in enumerate(slices):
print(" plotting ", i)
plt.figure( figsize=figsize )
plt.imshow( F[i], origin='lower', interpolation=interpolation, cmap=cmap, extent=extent, vmin=vmin, vmax=vmax )
if cbar:
plt.colorbar();
plotGeom( atoms, bonds, atomSize=atomSize )
plt.xlabel(r' Tip_x $\AA$')
plt.ylabel(r' Tip_y $\AA$')
if zs is None:
plt.title( r"iz = %i" %i )
else:
plt.title( r"Tip_z = %2.2f $\AA$" %zs[i] )
plt.savefig( prefix+'_%3.3i.png' %i, bbox_inches='tight' )
plt.close()
def plotVecFieldRG(
prefix, dXs, dYs, slices,
extent=None, zs = None, figsize=default_figsize,
interpolation=default_interpolation,
atoms=None, bonds=None, atomSize=default_atom_size
):
for ii,i in enumerate(slices):
print(" plotting ", i)
plt.figure( figsize=( 10,10 ) )
HSBs,vmax = colorize_XY2RG(dXs[i],dYs[i])
plt.imshow( HSBs, extent=extent, origin='lower', interpolation=interpolation )
plotGeom( atoms, bonds, atomSize=atomSize )
plt.xlabel(r' Tip_x $\AA$')
plt.ylabel(r' Tip_y $\AA$')
if zs is None:
plt.title( r"iz = %i" %i )
else:
plt.title( r"Tip_z = %2.2f $\AA$" %zs[i] )
plt.savefig( prefix+'_%3.3i.png' %i, bbox_inches='tight' )
plt.close()
def plotDistortions(
prefix, X, Y, slices, BG=None, by=2,
extent=None, zs = None, figsize=default_figsize,
cmap=default_cmap, interpolation=default_interpolation, vmin=None, vmax=None, cbar=False, markersize=1.0,
atoms=None, bonds=None, atomSize=default_atom_size
):
for ii,i in enumerate(slices):
print(" plotting ", i)
plt.figure( figsize=figsize )
plt.plot ( X[i,::by,::by].flat, Y[i,::by,::by].flat, 'r.', markersize=markersize )
if BG is not None:
plt.imshow( BG[i,:,:], origin='lower', interpolation=interpolation, cmap=cmap, extent=extent, vmin=vmin, vmax=vmax )
if cbar:
plt.colorbar()
plotGeom( atoms, bonds, atomSize=atomSize )
plt.xlabel(r' Tip_x $\AA$')
plt.ylabel(r' Tip_y $\AA$')
if zs is None:
plt.title( r"iz = %i" %i )
else:
plt.title( r"Tip_z = %2.2f $\AA$" %zs[i] )
plt.savefig( prefix+'_%3.3i.png' %i, bbox_inches='tight' )
plt.close()
def plotArrows(
# not yet tested
prefix, dX, dY, X, Y, slices, BG=None, C=None,
extent=None, zs = None, by=2, figsize=default_figsize,
cmap=default_cmap, interpolation=default_interpolation, vmin=None, vmax=None, cbar=False,
atoms=None, bonds=None, atomSize=default_atom_size
):
for ii,i in enumerate(slices):
print(" plotting ", i)
plt.figure( figsize=figsize )
#plt.plt.quiver( dX, dY, X, Y, C=C, width=width, scale=scale )
plt.quiver( Xs[::by,::by], Ys[::by,::by], dX[::by,::by], dY[::by,::by], color = 'k', headlength=10, headwidth=10, scale=15 )
if BG is not None:
plt.imshow ( BG[i,:,:], origin='lower', interpolation=interpolation, cmap=cmap, extent=extent, vmin=vmin, vmax=vmax )
if cbar:
plt.colorbar()
plotGeom( atoms, bonds, atomSize=atomSize )
plt.xlabel(r' Tip_x $\AA$')
plt.ylabel(r' Tip_y $\AA$')
if zs is None:
plt.title( r"iz = %i" %i )
else:
plt.title( r"Tip_z = %2.2f $\AA$" %zs[i] )
plt.savefig( prefix+'_%3.3i.png' %i, bbox_inches='tight' )
plt.close()
# ================
def makeCmap_Blue1( vals=( 0.25, 0.5, 0.75 ) ):
cdict = { 'red': ( (0.0, 1.0, 1.0), (vals[0], 1.0, 1.0), (vals[1], 1.0, 1.0), (vals[2], 0.0, 0.0), (1.0, 0.0, 0.0) ),
'green': ( (0.0, 0.0, 0.0), (vals[0], 1.0, 1.0), (vals[1], 1.0, 1.0), (vals[2], 1.0, 1.0), (1.0, 0.0, 0.0) ),
'blue': ( (0.0, 0.0, 0.0), (vals[0], 0.0, 0.0), (vals[1], 1.0, 1.0), (vals[2], 1.0, 1.0), (1.0, 1.0, 1.0) ) }
return LinearSegmentedColormap('BlueRed1', cdict)
def makeCmap_Blue2( vals=( 0.25, 0.5, 0.75 ) ):
cdict = { 'red': ( (0.0, 1.0, 1.0), (vals[0], 1.0, 1.0), (vals[1], 0.0, 0.0), (vals[2], 0.0, 0.0), (1.0, 0.0, 0.0) ),
'green': ( (0.0, 1.0, 1.0), (vals[0], 0.0, 0.0), (vals[1], 0.0, 0.0), (vals[2], 0.0, 0.0), (1.0, 1.0, 1.0) ),
'blue': ( (0.0, 0.0, 0.0), (vals[0], 0.0, 0.0), (vals[1], 0.0, 0.0), (vals[2], 1.0, 1.0), (1.0, 1.0, 1.0) ) }
return LinearSegmentedColormap('BlueRed1', cdict)
| mit |
hitszxp/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
SystemsGenetics/KINC | bin/kinc-3d-viewer.py | 1 | 63581 | #!/usr/bin/env python3
"""
Creates a Dash application that provides 3D visualization of a KINC network.
For usage instructions run this script with the --help flag.
"""
import argparse
import numpy as np
import pandas as pd
import igraph as ig
import plotly as py
import seaborn as sns
import plotly.graph_objects as go
from fa2 import ForceAtlas2
import random
import dash
import dash_core_components as dcc
import dash_html_components as html
import os
import json
import re
import ast
import time
import base64
from progress.bar import IncrementalBar
import socket
def load_network(file_path):
"""
Imports the KINC-generated network file (either full or Tidy versions).
file_path : The path to the network file.
return : A pandas dataframe containing the network.
"""
net = pd.read_csv(file_path, sep="\t")
# Make sure the file has the required columns
columns = net.columns
if ('Source' not in columns) | ('Target' not in columns) | ('Samples' not in columns) | ('p_value' not in columns) | ('r_squared' not in columns) |('Test_Name' not in columns):
print("ERROR: The network file does not seem to be KINC tidy file. It is missing one or more of the following column headers: Source, Target, Samples, p_value, r_squared or Test_Name. Please check the file.")
exit(1)
return net
def load_gem(file_path):
"""
Imports the tab-delimited Gene Expression Matrix (GEM) or Metabolite
GEM files can be generated from RNA-seq data using GEMmaker. Alternatively,
this can be a metabolite abundance matrix.
file_path : The path to the GEM file. The file should be log2 transformed.
return : A pandas dataframe containing the GEM.
"""
gem = pd.read_csv(file_path, sep="\t")
return gem
def load_amx(file_path, sample_col = 'Sample'):
"""
Imports the tab-delimited annotation matrix (amx).
The matrix must have at least one column that contains a unique list of
sample names.
file_path : The path to the annotation matrix.
sample_col : The name of the column that contains the sample names. Defaults
to 'Sample'
return : A pandas dataframe containing the annotation matrix.
"""
amx = pd.read_csv(file_path, sep="\t")
amx.index = amx[sample_col]
return amx
def load_node_meta(file_path):
"""
Imports the tab-delimited node metadata file.
The format of the file must have 4 columns, with the first containing the
node name, the second a controlled vocabulary term ID, the third the
term definition and the fourth the vocubulary name.
"""
nmeta = pd.read_csv(file_path, sep="\t")
nmeta.columns = ['Node', 'Term', 'Definition', 'Vocabulary']
nmeta.index = nmeta['Node']
return nmeta
def get_iGraph(net):
"""
Converts the KINC network dataframe into an iGraph object.
Igraph objects are handly for performing network statistics such as
transitivity and degree calculations.
net : The network dataframe created by the load_network function.
return : An igraph object of the network loaded with the source, target and
Similarity_Score (as the weight)
"""
g = ig.Graph()
# Add the nodes
v = pd.concat([net['Source'], net['Target']]).unique()
g.add_vertices(v)
# Add the edges
g.add_edges(net[['Source', 'Target']].values)
# Add the edge w
#g.es['weight'] = net['Similarity_Score']
return g
def calculate_2d_layout(net, net_prefix, redo_layout, iterations):
"""
Calculates a typical 2D layout for the network.
The first time this function is called on a network it may take some time
depending on the size of the network. The layout is saved in a file with
the same name as the network but with a '.glayout.txt' extension in
the working directory. On subsequent runs of this program that file is
imported if it exists.
net : The network dataframe created by the load_network function.
net_prefix : The filename of the file that will house the layout
after it is calculated. The file will be saved with this name
and the extension ".2Dlayout.txt"
redo_layout : A boolean indicting if the layout should be rebuilt rather
than loading from file if one exists already.
return : a Pandas dataframe containing the layout coordinates for
the nodes in the network. The dataframe contains X, and Y
dimenstional coordinates.
"""
g = get_iGraph(net)
g.simplify()
t = pd.Series(g.transitivity_local_undirected(), index=g.vs['name'])
d = pd.DataFrame(g.degree(), index=g.vs['name'], columns=['Degree'])
forceatlas2 = ForceAtlas2(
# Behavior alternatives
outboundAttractionDistribution=True, # Dissuade hubs
linLogMode=False, # NOT IMPLEMENTED
adjustSizes=False, # Prevent overlap (NOT IMPLEMENTED)
edgeWeightInfluence=1.0,
# Performance
jitterTolerance=1.0, # Tolerance
barnesHutOptimize=True,
barnesHutTheta=1.2,
multiThreaded=False, # NOT IMPLEMENTED
# Tuning
scalingRatio=2.0,
strongGravityMode=False,
gravity=1,
# Log
verbose=True)
if (redo_layout | (not os.path.exists(net_prefix + '.2Dlayout.txt'))):
print("Calculating 2D layout.")
glayout = pd.DataFrame(forceatlas2.forceatlas2_igraph_layout(g, iterations=iterations).coords)
glayout.columns = ['X', 'Y']
glayout.index = g.vs['name']
glayout = pd.concat([glayout, d, t], axis=1, sort=False)
glayout.columns = ['X', 'Y', 'Degree', 'CC']
glayout.to_csv(net_prefix + '.2Dlayout.txt')
else:
glayout = pd.read_csv(net_prefix + '.2Dlayout.txt', index_col=0)
return glayout
def bin_edges(net):
"""
Calculates a set of bins using the Similarity score and P-value.
It is from these bins that the edges and nodes of the network will be
stacked in the z-axis of the 3D plot and or colored. Four new
columns are added to the provided network: 'Edge_Bin', 'Pval_Bin',
'Rsqr_Bin' and 'Relationship'.
net : The network dataframe created by the load_network function.
"""
net['Edge_Bin'] = np.around(np.abs(net['Similarity_Score']), decimals=2)
net['Pval_Bin'] = np.round(-np.log10(net['p_value']))
if 'hotelling_p_value' in net.columns:
net['HPval_Bin'] = np.round(-np.log10(net['hotelling_p_value']))
if (net['r_squared'].dtype == 'object'):
net['Rsqr_Bin'] = 0
else:
net['Rsqr_Bin'] = np.around(net['r_squared'], decimals=1)
net['Relationship'] = np.ceil(net['Similarity_Score']).astype('str')
net['Relationship'] = net['Relationship'].replace("-0.0", 'Negative')
net['Relationship'] = net['Relationship'].replace("1.0", 'Positive')
def get_vertex_zlayers(net, glayout, net_prefix, redo_layout):
"""
Uses the 2D layout and calculates the Z-coordinate for the nodes.
net : The network dataframe created by the load_network function.
glayout : The dataframe containing the 2D layout of the nodes.
net_prefix: The filename of the file that will house the vertex layout
after it is calculated. The file will be saved with this name
and the extension ".3Dvlayers.txt"
redo_layout : A boolean indicting if the layout should be rebuilt rather
than loading from file if one exists already.
return : A Pandas dataframe containing the X, Y and Z coordinates for the
nodes as well as the Degree and CC (clustering coefficient) for
each node.
"""
def find_vlayers(row, vtype='Source', bar=None):
if bar:
bar.next()
node = glayout.loc[row[vtype]]
ebin = row['Edge_Bin']
pbin = row['Pval_Bin']
hpbin = np.nan
if ('HPval_Bin' in row.index):
hpbin = row['HPval_Bin']
rbin = row['Rsqr_Bin']
rel = row['Relationship']
test = row['Test_Name']
return(row[vtype], node['X'], node['Y'], ebin, pbin, hpbin, rbin, rel, test, node['Degree'], node['CC'])
if (redo_layout | (not os.path.exists(net_prefix + '.3Dvlayers.txt'))):
print("Calculating 3D vertex layout.")
bar = IncrementalBar('', max=net.shape[0]*2, suffix='%(percent)d%%')
lsource = net.apply(find_vlayers, vtype='Source', bar=bar, axis=1)
ltarget = net.apply(find_vlayers, vtype='Target', bar=bar, axis=1)
print("")
columns = ['Vertex', 'X', 'Y', 'EBin', 'PBin', 'HPBin', 'RBin', 'Rel', 'Test_Name', 'Degree', 'CC']
vlayers = pd.DataFrame.from_records(lsource.append(ltarget).values, columns=columns)
vlayers = vlayers[vlayers.duplicated() == False]
# We want to place the node in the layer where it first appears.
vlayers = vlayers.groupby(by=['Vertex']).apply(lambda g: g[g['EBin'] == g['EBin'].max()])
vlayers.reset_index(inplace=True, drop=True)
vlayers.to_csv(net_prefix + '.3Dvlayers.txt')
else:
vlayers = pd.read_csv(net_prefix + '.3Dvlayers.txt', index_col=0)
return vlayers
def get_edge_zlayers(net, glayout, net_prefix, redo_layout):
"""
Uses the 2D layout and calculates the Z-coordinate for the edges.
Edges are drawn as lines in the 3D scatterplot, therefore this function
calculates the start and stop coordinates for the edges in the format
required by the scatter3d viewer.
net : The network dataframe created by the load_network function.
glayout : The dataframe containing the 2D layout of the nodes.
net_prefix: The filename of the file that will house the vertex layout
after it is calculated. The file will be saved with this name
and the extension ".3Delayers.txt"
redo_layout : A boolean indicting if the layout should be rebuilt rather
than loading from file if one exists already.
return : A Pandas dataframe containing the X, Y and Z coordinates arrays
for the edges as well as Source, Target and Samples values from
the original network. The X, Y and Z coordiantes are tuples.
"""
def place_elayers(row, bar = None):
if bar:
bar.next()
ebin = row['Edge_Bin']
pbin = row['Pval_Bin']
hpbin = np.nan
if ('HPval_Bin' in row.index):
hpbin = row['HPval_Bin']
rbin = row['Rsqr_Bin']
rel = row['Relationship']
test = row['Test_Name']
source = glayout.loc[row["Source"]]
target = glayout.loc[row["Target"]]
return([[source['X'], target['X'], None],
[source['Y'], target['Y'], None],
row["Source"],
row["Target"],
row["Samples"],
ebin, pbin, hpbin, rbin, rel, test])
if (redo_layout | (not os.path.exists(net_prefix + '.3Delayers.txt'))):
print("Calculating 3D edge layout.")
bar = IncrementalBar('', max=net.shape[0], suffix='%(percent)d%%')
ledge = net.apply(place_elayers, bar=bar, axis=1)
print("")
elayers = pd.DataFrame.from_records(ledge, columns=['X', 'Y', 'Source', 'Target', 'Samples', 'EBin', 'PBin', 'HPBin', 'RBin', 'Rel', 'Test_Name'])
elayers['name'] = elayers['Source'] + " (co) " + elayers['Target']
elayers.to_csv(net_prefix + '.3Delayers.txt')
else:
elayers = pd.read_csv(net_prefix + '.3Delayers.txt', index_col=0)
elayers['X'] = elayers['X'].apply(ast.literal_eval)
elayers['Y'] = elayers['Y'].apply(ast.literal_eval)
return elayers
def create_network_plot(net, vlayers, elayers, color_by = 'Score', layer_by = 'Score',
camera = None, aspect = None):
"""
Uses Plotly to create the interactive 3D visualization of the network.
This function uses the Scatter3D plot to draw the network. The axes are
hidden so it appears as a typical network view. It defaults to
a straight on view as the network would be seen in a typical 2D viewer like
Cytoscape.
net : The network dataframe created by the load_network function.
vlayers : The dataframe containing the 3D coordinates for the nodes.
elayers : The dataframe containing the 3D coordinates for the edges.
camera : A dictionary containing the figure camera coordinates.
return : a Plotly figure object.
"""
# Default Z-indexs for lines/points to the Score value.
Z = vlayers['EBin']
if layer_by == 'Score':
Z = vlayers['EBin']
if layer_by == 'P-value':
Z = vlayers['PBin']
if layer_by == 'Hotelling P-value (phased)':
Z = vlayers['HPBin']
if layer_by == 'R^2':
Z = vlayers['RBin']
if layer_by == 'Test Name':
Z = vlayers['Test_Name']
if layer_by == 'Relationship':
Z = vlayers['Rel']
# Add the network nodes as the first trace.
fig1 = go.Figure(data=[go.Scatter3d(x=vlayers['X'], y=vlayers['Y'],
z=Z, mode='markers',
opacity = 0.5,
marker=dict(symbol='circle', size=np.log10(vlayers['Degree'])*4,
line=dict(width=1, color="#888888")),
text="Node: " + vlayers['Vertex'],
customdata=vlayers['Vertex'],
hoverinfo='text', name='Nodes')])
# Add the edges and bin them
include_slider = True
if color_by == 'Score':
slider_title = 'Similarity Score'
if color_by == 'P-value':
slider_title = '-log10(p)'
if color_by == 'Hotelling P-value (phased)':
slider_title = '-log10(p)'
if color_by == 'R^2':
slider_title = 'R-squared'
if color_by == 'Test Name':
slider_title = 'Test Name'
include_slider = False
if color_by == 'Relationship':
slider_title = 'Relationship Type'
include_slider = False
layer_title = layer_by
if layer_by == 'P-value':
layer_title = '-log10(p)'
if layer_by == 'Hotelling P-value (phased)':
layer_title = '-log10(p)'
(colorway, sliders, nticks) = create_binned_network_figure(fig1, elayers, color_by,
layer_by, slider_title, include_slider)
fig1.update_layout(
autosize=True,
#title=dict(text = "3D Network View", font = dict(color='#FFFFFF')),
showlegend=True,
legend=dict(font = dict(color="#FFFFFF")),
margin=dict(l=450, r=10, t=10, b=10),
paper_bgcolor="#000000",
colorway=colorway,
scene=dict(
aspectmode="cube",
xaxis=dict(showbackground=False, showline=False, zeroline=False, showgrid=False,
showticklabels=False, title='', showspikes=False),
yaxis=dict(showbackground=False, showline=False, zeroline=False, showgrid=False,
showticklabels=False, title='', showspikes=False),
zaxis=dict(showbackground=False, showline=False, zeroline=False, showgrid=False,
showticklabels=True, tickmode="auto", nticks=nticks, title=layer_title, showspikes=False, color="#FFFFFF")
),
hovermode='closest',
annotations=[dict(showarrow=False, text="", xref='paper', yref='paper',
x=0, y=0.1, xanchor='left', yanchor='bottom', font=dict(size=14))
],
sliders=sliders,
)
# We want an orthographic layout so that when looking above the edges line up
# with the nodes.
fig1.layout.scene.camera.projection.type = "orthographic"
fig1.layout.scene.camera.eye = dict(x=0, y=0, z=2)
if camera:
fig1.layout.scene.camera.eye = camera['eye']
fig1.layout.scene.aspectmode = 'manual'
if aspect:
fig1.layout.scene.aspectratio = aspect
return fig1
def create_binned_network_figure(figure, elayers, color_by = 'Score',
layer_by = 'Score', slider_title = 'Similarity Score', include_slider = True):
"""
Adds the traces for the network figure based on the bin column.
"""
color_col = 'EBin'
if color_col == 'Score':
color_col = 'EBin'
if color_by == 'P-value':
color_col = 'PBin'
if color_by == 'Hotelling P-value (phased)':
color_col = 'HPBin'
if color_by == 'R^2':
color_col = 'RBin'
if color_by == 'Test Name':
color_col = 'Test_Name'
if color_by == 'Relationship':
color_col = 'Rel'
layer_col = 'EBin'
if layer_by == 'Score':
layer_col = 'EBin'
if layer_by == 'P-value':
layer_col = 'PBin'
if layer_by == 'Hotelling P-value (phased)':
layer_col = 'HPBin'
if layer_by == 'R^2':
layer_col = 'RBin'
if layer_by == 'Test Name':
layer_col = 'Test_Name'
if layer_by == 'Relationship':
layer_col = 'Rel'
# Add edge traces to the figure, one each per bin.
layer_bins = np.flip(np.sort(elayers[layer_col].unique()))
color_bins = np.flip(np.sort(elayers[color_col].unique()))
for bin in color_bins:
if (not type(bin) == str):
if (bin.dtype == "float64") & (np.isnan(bin)):
continue
bin_edges = elayers[elayers[color_col] == bin]
# Reformat the elayers for use by the Scatter3d function.
eX = np.hstack(bin_edges['X'])
eY = np.hstack(bin_edges['Y'])
eZ = np.hstack(bin_edges[layer_col].repeat(3))
names = bin_edges['name'][bin_edges.index.repeat(3)]
# Create the scatterplot containing the lines for edges.
figure.add_trace(go.Scatter3d(x=eX, y=eY, z=eZ,
mode='lines',
line=dict(width=1),
text="Edge: " + names,
hoverinfo='text', name=bin,
customdata=bin_edges.index.repeat(3)))
# Add a slider for the network viewer
if include_slider:
steps = []
steps.append(dict(
method="restyle",
args=["visible", [True] * (len(color_bins) + 2)],
label='all'
))
steps.append(dict(
method="restyle",
args=["visible", [False] * (len(color_bins) + 2)],
label='nodes'
))
steps[1]["args"][1][0] = True
for i in range(len(color_bins)):
step = dict(
method="restyle",
args=["visible", [False] * (len(color_bins) + 2)],
label=color_bins[i]
)
# Turn on the layers for this step and leave on the nodes layer.
step["args"][1][0] = True
for j in range(1,i+2):
step["args"][1][j] = True
# Set the label.
steps.append(step)
colorway = ["#FFFFFF"] + sns.color_palette('viridis_r', color_bins.size).as_hex()
sliders = [dict(
active=0,
currentvalue={"prefix": slider_title + ": "},
pad={"b": 50},
steps=steps,
font=dict(color = '#FFFFFF'),
tickcolor='#FFFFFF',
len=1)]
else:
colorway = ["#FFFFFF"] + sns.color_palette('muted', color_bins.size).as_hex()
sliders = []
nticks = layer_bins.size
if layer_by == 'Score':
nticks = int(nticks / 2)
return (colorway, sliders, nticks)
def create_degree_distribution_plot(vlayers):
"""
Creates a 2D scatterplot containing the degree distribution
"""
vdata = vlayers.loc[:,('Vertex', 'Degree')].drop_duplicates()
vdata = vdata.groupby('Degree').agg(['count']).reset_index()
fig = go.Figure(data=[go.Scatter(
x=vdata['Degree'],
y=vdata['Vertex']['count'],
mode='markers',
marker=dict(symbol='circle', size=5, color='#000088'))])
fig.update_layout(
height=350,
title="Node Degree Distribution",
margin=dict(l=10, r=10, t=80, b=20),
xaxis_type="log",
yaxis_type="log",
xaxis_title="Degree",
yaxis_title="Number of Nodes",
)
return fig
def create_avg_cc_distribution_plot(vlayers):
"""
Creates a 2D scatterplot containing the average clustering coefficient distribution
"""
vdata = vlayers.loc[:,('CC', 'Degree')].drop_duplicates()
vdata = vdata.groupby('Degree').agg(['mean']).reset_index()
fig = go.Figure(data=[go.Scatter(
x=vdata['Degree'],
y=vdata['CC']['mean'],
mode='markers',
marker=dict(symbol='circle', size=5, color='#000088'))])
fig.update_layout(
height=350,
title="Avg. Clusering Coefficient Distribution",
margin=dict(l=10, r=10, t=80, b=10),
xaxis_type="log",
yaxis_type="log",
xaxis_title="Degree",
yaxis_title="Number of Nodes",
)
return fig
def create_expression_scatterplot(gem, amx, elayers, color_col=None, edge_index = None):
"""
Uses Plotly to create the interactive 3D scatterplot of co-expression
This function uses the Scatter3D plot to draw the co-expression scatterplot.
It defaults to a straight on view but can be interactively rotated,
panned, etc.
net : The network dataframe created by the load_network function.
amx : The annotation matrix dataframe created by the load_amx function.
elayers : The dataframe containing the 3D coordinates for the edges.
color_col : The name of the column in the amx that contains the category
that should be used for coloring the points in the plot.
edge_index : The numerical index of the edge in the elayers dataframe
that is to be plotted.
return : a Plotly figure object.
"""
if edge_index is None:
return go.Figure(go.Scatter3d())
node1 = elayers.iloc[edge_index]['Source']
node2 = elayers.iloc[edge_index]['Target']
samples = elayers.iloc[edge_index]['Samples']
# Generate the dataframe for the expression scatterplot
sdata = pd.DataFrame(dict(X=gem.loc[node1].values, Y=gem.loc[node2].values))
sdata.index = gem.columns
sdata = sdata.join(amx, how='left')
# Calculate the sizes of the points.
sizes = pd.Series(list(samples))
sizes = sizes.replace(to_replace=r'[^1]', value='5', regex=True)
sizes = sizes.replace({'1': '10'})
sizes = sizes.astype('int')
sizes.index = sdata.index
# Generate the colors for the samples.
if (color_col == None):
color_col = 'Cluster'
# If the column is 'Cluster' we need to add it to the dataframe. The
# Cluster column simply lists if the sample is in the cluster or not.
if (color_col == 'Cluster'):
inout = pd.Series(list(samples))
inout = inout.replace(to_replace=r'[^1]', value='Out', regex=True)
inout = inout.replace({'1': 'In'})
inout.index = gem.columns
sdata = pd.concat([sdata, inout.rename('Cluster')], 1)
# Is this a categorical column?
is_categorical = False
categories = sdata[color_col].unique()
if (categories.dtype == object):
is_categorical = True
# Now draw the plot
nticks = None
tickmode = 'auto'
ticktext = None
tickvals = None
if is_categorical:
num_categories = categories.shape[0]
tickmode = 'array'
ticktext = categories
tickvals = np.arange(0, num_categories) / (num_categories - 1) - 0.5
replace_df = pd.DataFrame({'Categories' : categories,'Z' : tickvals})
sdata['Z'] = sdata[color_col].replace(
to_replace=replace_df['Categories'].values,
value=replace_df['Z'].values)
nticks = num_categories
showlegend = True
first_category = (sdata[color_col] == categories[0])
fig2 = go.Figure(data=[go.Scatter3d(x=sdata[first_category]['X'],
z=sdata[first_category]['Y'],y=sdata[first_category]['Z'],
mode='markers',
marker=dict(symbol='circle', size=sizes[first_category]),
text= sdata[first_category].index, hoverinfo='text',
name=str(categories[0]))])
for i in range(1, len(categories)):
next_category = (sdata[color_col] == categories[i])
fig2.add_trace(go.Scatter3d(x=sdata[next_category]['X'],
z=sdata[next_category]['Y'], y=sdata[next_category]['Z'],
mode='markers',
marker=dict(symbol='circle',size=sizes[next_category]),
text= sdata[next_category].index,
hoverinfo='text', name=str(categories[i])))
else:
num_categories = None
sdata['Z'] = sdata[color_col]
tickvals = []
showlegend = False
fig2 = go.Figure(data=[go.Scatter3d(x=sdata['X'], z=sdata['Y'], y=sdata['Z'],
mode='markers',
marker=dict(symbol='circle', size=sizes,
color=sdata['Z'], colorscale='Viridis'),
text= sdata.index, hoverinfo='text')])
fig2.update_layout(
height=400,
title="",
showlegend=showlegend,
legend={'itemsizing': 'constant'},
margin=dict(l=10, r=10, t=0, b=10),
scene=dict(
aspectmode="cube",
xaxis=dict(showbackground=True, showline=True, zeroline=True, showgrid=True,
showticklabels=True, title=node1,
showspikes=True),
zaxis=dict(showbackground=True, showline=True, zeroline=True, showgrid=True,
showticklabels=True, title=node2,
showspikes=True),
yaxis=dict(showbackground=True, showline=True, zeroline=True, showgrid=True,
showticklabels=True, title=color_col,
tickmode=tickmode, ticktext=ticktext, tickvals=tickvals, nticks=nticks, showspikes=True),
),
hovermode='closest',
annotations=[dict(showarrow=False,
text="",
xref='paper', yref='paper',
x=0, y=0.1, xanchor='left', yanchor='bottom', font=dict(size=14))
],
datarevision = time.time()
)
fig2.layout.scene.camera.projection.type = "orthographic"
fig2.layout.scene.camera.eye = dict(x=0, y=-1, z=0)
return fig2
def create_network_stats_table(net):
"""
Construts the HTML table that holds information about the network.
net : the network data frame.
"""
htr_style = {}
htd_style = {
'text-align' : 'left', 'padding' : '5px',
'margin': '0px', 'padding' : '0 0 0 20',
'width' : '60%', "border-bottom": "1px solid #BBBBBB"}
td_style = {
'text-align' : 'left', 'padding' : '5px',
'margin': '0px', 'padding' : '0 0 0 20', "border-bottom": "1px solid #BBBBBB"
}
div_children = []
table_rows = []
num_edges = net.shape[0]
unique_edges = net.loc[:,('Source', 'Target')].drop_duplicates().shape[0]
num_nodes = len(pd.concat([net['Source'], net['Target']]).unique())
div_children.append(
html.Table(
style = {
"background-color" : 'white', 'color' : 'black',
'margin-top' : '0px', 'width' : '100%',
'margin-bottom' : '0px'
},
children=[
html.Tr([
html.Th('Total Edges', style=htd_style),
html.Td(num_edges, style=td_style)
]),
html.Tr([
html.Th('Unique Edges', style=htd_style),
html.Td(unique_edges, style=td_style)
]),
html.Tr([
html.Th('Number of Nodes', style=htd_style),
html.Td(num_nodes, style=td_style)
])
]
)
)
return html.Div(
id='network-stats-table',
children = div_children,
)
def create_dash_edge_table(net, edge_index = None):
"""
Constructs the HTML table that holds edge information for the Dash appself.
elayers : The dataframe containing the 3D coordinates for the edges.
edge_index : The numerical index of the edge in the elayers dataframe
that is to be plotted.
returns : a Dash html.Table object.
"""
htr_style = {}
htd_style = {
'text-align' : 'left', 'padding' : '5px',
'margin': '0px', 'padding' : '0 0 0 20',
'width' : '30%', "border-bottom": "1px solid #BBBBBB"}
td_style = {
'text-align' : 'left', 'padding' : '5px',
'margin': '0px', 'padding' : '0 0 0 20', "border-bottom": "1px solid #BBBBBB"
}
net_fixed = net.drop(['Samples', 'Edge_Bin', 'Pval_Bin', 'Rsqr_Bin', 'Relationship'], axis=1)
if ('HPval_Bin' in net_fixed.columns):
net_fixed = net_fixed.drop(['HPval_Bin'], axis=1)
for colname in net_fixed.columns:
if ('p_value' in colname):
net_fixed[colname] = net_fixed[colname].apply(np.format_float_scientific, precision=4)
columns = net_fixed.columns
div_children = []
if not edge_index == None:
row_vals = net_fixed.iloc[edge_index]
source = row_vals['Source']
target = row_vals['Target']
div_children.append(html.Label(
'{source} (co) {target}'.format(source = source, target=target),
style = {'padding' : '0px', 'margin' : '0px'}
))
div_children.append(html.Br())
row_vals = net_fixed[(net_fixed['Source'] == source) & (net_fixed['Target'] == target)]
for index, row in row_vals.iterrows():
table_rows = []
for col in columns:
if col == "Source" or col == "Target":
continue
table_rows.append(
html.Tr([
html.Th(col, style=htd_style),
html.Td(row[col], style=td_style)
])
)
div_children.append(
html.Label('Edge #{index}'.format(index = index)))
div_children.append(
html.Table(
style = {
"background-color" : 'white', 'color' : 'black',
'margin-top' : '10px', 'margin-bottom' : '0px',
'width' : '100%',
},
children=table_rows
)
)
else:
div_children.append(
html.Div('To view edge details, click an edge in the network.')
)
return html.Div(
id='edge-table',
children = div_children,
)
def create_dash_sample_table(net, amx, sample = None):
"""
Constructs the HTML table that holds sample information for the Dash appself.
amx : The annotation matrix dataframe created by the load_amx function.
sample : The name of the sample to display
returns : a Dash html.Table object.
"""
htr_style = {}
htd_style = {
'text-align' : 'left', 'padding' : '5px',
'margin': '0px', 'padding' : '0 0 0 20',
'width' : '30%', "border-bottom": "1px solid #BBBBBB"}
td_style = {
'text-align' : 'left', 'padding' : '5px',
'margin': '0px', 'padding' : '0 0 0 20', "border-bottom": "1px solid #BBBBBB"
}
columns = amx.columns
div_children = []
if sample:
div_children.append(html.H4(
children = ['Sample: {sample}'.format(sample = sample)],
style = {'padding' : '0px', 'margin' : '0px'}
))
table_rows = []
row = amx.loc[sample]
for col in columns:
table_rows.append(
html.Tr([
html.Th(col, style=htd_style),
html.Td(row[col], style=td_style)
])
)
div_children.append(
html.Table(
style = {
"background-color" : 'white', 'color' : 'black',
'margin-top' : '10px',
'margin-bottom' : '10px', 'width' : '100%',
},
children=table_rows
)
)
else:
div_children.append(
html.Div('To view sample details, click an edge in the network, then in the edge scatterplot click a sample.')
)
return html.Div(
id='sample-table',
children = div_children
)
def create_dash_node_table(net, nmeta, vlayers, node = None):
"""
Constructs the HTML table that holds node information for the Dash app.
net : The network dataframe created by the load_network function.
nmeta : The dataframe containing the node metadata.
vlayers : The dataframe containing the 3D coordinates for the nodes.
node : The name of the node to display
returns : a Dash html.Table object.
"""
htr_style = {}
htd_style = {
'text-align' : 'left', 'padding' : '5px',
'margin': '0px', 'padding' : '0 0 0 20',
'width' : '30%', "border-bottom": "1px solid #BBBBBB"}
td_style = {
'text-align' : 'left', 'padding' : '5px',
'margin': '0px', 'padding' : '0 0 0 20', "border-bottom": "1px solid #BBBBBB"
}
div_children = []
table_rows = []
if not node is None:
table_rows.append(
html.Tr([
html.Th('Name', style=htd_style),
html.Td(node, style=td_style)
])
)
table_rows.append(
html.Tr([
html.Th('Degree', style=htd_style),
html.Td(vlayers.loc[vlayers['Vertex'] == node, 'Degree'].unique(), style=td_style)
])
)
if not nmeta is None:
columns = nmeta.columns
if not nmeta is None:
rows = nmeta.loc[node]
for index, row in rows.iterrows():
table_rows.append(
html.Tr([
html.Th(
colSpan = 2,
children=[
html.Label(
"{term}".format(term = row['Term']),
style= {'font-weight' : 'bold'}
),
html.Div(
row['Definition'],
style= {'font-weight' : 'normal'})
],
style=htd_style,
)
])
)
else:
div_children.append(
html.Div('There is no additional information about this node.')
)
else:
div_children.append(
html.Div('There are no node meta data provided. Use the --nmeta option to load node data when running this application.')
)
div_children.append(
html.Table(
style = {
"background-color" : 'white', 'color' : 'black',
'margin-top' : '0px',
'margin-bottom' : '0px', 'width' : '100%',
},
children=table_rows
)
)
else:
div_children.append(
html.Div('To view node details, click a node in the network.')
)
return html.Div(
id='node-table',
children = div_children
)
def create_condition_select(amx, sample_col = 'Cluster'):
"""
Creates a Dash select dropdown for selecting the condition to view.
This dropbox is intended to change the 3D co-expression scatterplot.
amx : The annotation matrix dataframe created by the load_amx function.
color_col : The name of the column in the amx that contains the category
that should be used for coloring the points in the plot.
return : A Dash dcc.Dropdown object.
"""
columns = np.sort(amx.columns.values)
# Holds the list of columns to keep.
keep = []
keep.append('Cluster')
# Exclude any columns with just a single value or any columns with as
# many unique values as there are elements
for col in columns:
if len(amx[col].dropna().unique()) <= 1:
continue
if len(amx[col].dropna().unique()) == amx[col].size:
continue
keep.append(col)
# Build the select element.
select = dcc.Dropdown(
id = 'coexp-condition-select',
style = {'color' : 'black'},
options = [
{'label' : col, 'value' : col} for col in keep
],
value = 'Cluster'
)
return select
def create_edge_color_select(net):
"""
Creates a Dash select dropdown for selecting the network attribute to view.
This dropbox is intended to change the 3D network layout view.
net : The network dataframe created by the load_network function.
return : A Dash dcc.Dropdown object.
"""
options = ['Score']
if 'p_value' in net.columns:
options.append('P-value')
if 'hotelling_p_value' in net.columns:
options.append('Hotelling P-value (phased)')
if 'Test_Name' in net.columns:
options.append('Test Name')
if 'r_squared' in net.columns:
options.append('R^2')
options.append('Relationship')
select = dcc.Dropdown(
id = 'edge-color-select',
style = {
'color' : 'black'
},
options = [
{'label' : col, 'value' : col} for col in options
],
value = 'Score'
)
return select
def create_edge_layer_select(net):
"""
Creates a Dash select dropdown for selecting the network attribute to view.
This dropbox is intended to change the 3D network layout view.
net : The network dataframe created by the load_network function.
return : A Dash dcc.Dropdown object.
"""
options = ['Score']
if 'p_value' in net.columns:
options.append('P-value')
if 'hotelling_p_value' in net.columns:
options.append('Hotelling P-value (phased)')
if 'Test_Name' in net.columns:
options.append('Test Name')
if 'r_squared' in net.columns:
options.append('R^2')
options.append('Relationship')
select = dcc.Dropdown(
id = 'edge-layer-select',
style = {
'color' : 'black'
},
options = [
{'label' : col, 'value' : col} for col in options
],
value = 'Score'
)
return select
def build_sidebar_box_header(title, id_prefix):
return html.Div(
style = {
'background-color' : '#555555', 'color' : 'white',
'margin': '0px', 'padding':'10px',
"border-radius": "5px"},
children = [
html.H3(
children = [title],
style = {
'float' : 'left',
'padding' : '0px', 'margin' : '0px 0px 0px 0'
}
),
html.Button(
'toggle',
id="{prefix}-toggle".format(prefix=id_prefix),
n_clicks=0,
# src="https://img.icons8.com/officexs/32/000000/open-view.png",
style={
"height" : "20px", "float" : "right",
'padding' : '0px', 'margin' : '0px 0px 0px 0'
}
),
html.Div(style ={'clear' : 'both'})
]
)
def write_to_data_uri(s):
"""
Writes to a uri.
Use this function to embed javascript into the dash app.
Adapted from the suggestion by user 'mccalluc' found here:
https://community.plotly.com/t/problem-of-linking-local-javascript-file/6955/2
"""
uri = (
('data:;base64,').encode('utf8') +
base64.urlsafe_b64encode(s.encode('utf8'))
).decode("utf-8", "strict")
return uri
def build_application(net, gem, amx, nmeta, vlayers, elayers, sample_col,
net_name):
"""
Creates the Dash application.
The Dash application will provide all of the interactive plots, tables and
filters to interacitvely exploring the network.
net : The network dataframe created by the load_network function.
gem : The GEM dataframe created by the load_gem function.
amx : The annotation matrix dataframe created by the load_amx function.
nmeta : The dataframe containing the node metadata.
vlayers : The dataframe containing the 3D coordinates for the nodes.
elayers : The dataframe containing the 3D coordinates for the edges.
sample_col : The name of the column in the amx that contains the sample
name.
net_name : The name of the network to display.
return : The Dash application object.
"""
sidebar_box_style = {
"float" : "left", "width" : "100%", "color" : "black",
"padding" : "0px", "margin-bottom" : "10px",
"background-color" : "#CCCCCC",
"border-radius": "5px"
}
internal_js = write_to_data_uri("""
""")
external_scripts = [
'https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js',
internal_js,
]
external_stylesheets = [
]
app = dash.Dash(__name__,
external_scripts=external_scripts,
external_stylesheets=external_stylesheets
)
app.scripts.config.serve_locally = False
app.layout = html.Div(
style = {
"padding" : "0px", "background-color" :
"black", "margin" : "0px", "color" : "white",
"width" : "100%", "height" : "100vh"
},
children = [
# Graph Row
html.Div(
style = {
"border" : "0px solid white", "padding" : "15px",
"background-color" : "black", "margin" : "0px",
},
children=[
dcc.Graph(
id = 'network-3dview',
style = {
"height" : "100vh"
},
figure = create_network_plot(net, vlayers, elayers),
config = {
'toImageButtonOptions' : {
'filename': 'kinc_3d_network_view',
'width': 800,
'height': 600,
'format': 'svg',
'scale' : 2
}
}
),
dcc.Input(
id='current-network-3dview-camera',
type="number",
value=0,
style= {'display' : 'none'}
),
dcc.Input(
id='current-network-3dview-aspect',
type="number",
value=0,
style= {'display' : 'none'}
)
]
),
# Header Row
html.Div(
id = "header",
style={
"position" : "fixed", "left" : "30px", "top" : "20px",
'padding' : '0px', "margin" : "0px",
},
children=[
html.Img(
src="https://raw.githubusercontent.com/SystemsGenetics/KINC/master/docs/images/kinc.png",
style={
"height" : "55px","display" : "inline-block",
"padding" : "0px", "margin" : "0px 10px 0px 10px"}),
html.H1(children="3D Network Explorer",
style={
"display" : "inline-block", "padding" : "10px 0px 0px 0px",
"margin" : "0px", "vertical-align" : "top"}),
html.Div(children="Network name: " + net_name,
style={"padding" : "0px 0px 0px 10px"}),
]
),
# Left Sidebar
html.Div(
style={
"position" : "fixed", "left" : "30px", "top" : "120px",
'padding' : '0px 10px 0px 0px', "margin" : "0px",
"width" : "400px", "height" : "80vh", 'overflow-y': 'auto',
"scrollbar-color" : "dark"
},
children = [
# Edge Color and Layer selection boxes.
html.Div(
id='edge-select-box',
style=sidebar_box_style,
children=[
build_sidebar_box_header("Layout and Colors", 'edge-select-box'),
html.Div(
id='edge-select-box-contents',
style={'margin' : '0px', 'display' : 'none', 'padding' : '10px'},
children = [
html.Label('Color Edges By'),
create_edge_color_select(net),
html.Label('Layer Edges By'),
create_edge_layer_select(net)
]
)
]
),
# Node Details
html.Div(
style=sidebar_box_style,
children=[
build_sidebar_box_header("Node Details", 'node-table-box'),
html.Div(
id="node-table-box-contents",
style={'margin' : '0px', 'visibility' : 'hidden'},
children=[create_dash_node_table(net, nmeta, vlayers)]
),
]
),
# Edge Table
html.Div(
style=sidebar_box_style,
children=[
build_sidebar_box_header("Edge Details", 'edge-table-box'),
html.Div(
id="edge-table-box-contents",
style={'margin' : '0px', 'visibility' : 'hidden'},
children=[create_dash_edge_table(net)]
),
]
),
# 3D Co-Expression scatterplot row
html.Div(
style=sidebar_box_style,
children=[
build_sidebar_box_header("Edge Scatterplot", 'scatterplot-box'),
html.Div(
id='scatterplot-box-contents',
style={'margin' : '0px', 'display' : 'none'},
children = [
html.Div(
style={'padding-bottom' : '10px'},
children=[
html.Label('Color Samples By'),
create_condition_select(amx, sample_col)
],
),
dcc.Graph(
id = 'edge-expression-3dview',
figure = create_expression_scatterplot(gem, amx, elayers),
config = {
'toImageButtonOptions' : {
'filename': 'kinc_3d_expression_scatterplot',
'width': 800,
'height': 600,
'format': 'svg',
'scale' : 1
}
},
),
]
)
]
),
# Sample Details
html.Div(
style=sidebar_box_style,
children=[
build_sidebar_box_header("Sample Details", 'sample-table-box'),
html.Div(
id="sample-table-box-contents",
style={'margin' : '0px', 'visibility' : 'hidden'},
children=[create_dash_sample_table(net, amx)]
),
]
),
# network stats
html.Div(
style=sidebar_box_style,
children=[
build_sidebar_box_header("Network Stats", 'network-stats-box'),
html.Div(
id='network-stats-box-contents',
style={'margin' : '0px', 'padding' : '10px'},
children = [
create_network_stats_table(net),
dcc.Graph(
id = 'degree-distribution-plot',
figure = create_degree_distribution_plot(vlayers),
config = {
'toImageButtonOptions' : {
'filename': 'kinc_3d_degree_distribution',
'width': 800,
'height': 800,
'format': 'svg',
'scale' : 1
}
},
),
dcc.Graph(
id = 'avg-cc-distribution-plot',
figure = create_avg_cc_distribution_plot(vlayers),
config = {
'toImageButtonOptions' : {
'filename': 'kinc_3d_average_cc_distribution',
'width': 800,
'height': 600,
'format': 'svg',
'scale' : 1
}
},
),
]
)
]
),
],
),
dcc.Input(
id='current-expr-camera-coords',
type="number",
value=0,
style= {'display' : 'none'}
)
] # End app layout children
) # End app layout
# Callback when an object in the network plot is clicked.
@app.callback(
[dash.dependencies.Output('edge-expression-3dview', 'figure'),
dash.dependencies.Output('edge-table', 'children'),
dash.dependencies.Output('node-table', 'children')],
[dash.dependencies.Input('network-3dview', 'clickData'),
dash.dependencies.Input('coexp-condition-select', 'value')],
[dash.dependencies.State('edge-expression-3dview', 'figure')])
def set_current_edge(clickData, color_col, figure):
edge_index = None
node = None
if (clickData):
scatterplot = figure
node_table = None
edge_table = None
points = clickData['points']
efound = re.match('^Edge: (.*?) \(co\) (.*?)$', points[0]['text'])
nfound = re.match('^Node: (.*?)$', points[0]['text'])
if (efound):
edge_index = points[0]['customdata']
row_vals = elayers.iloc[edge_index]
source = row_vals['Source']
target = row_vals['Target']
edge_nodes = [source, target]
scatterplot = create_expression_scatterplot(gem, amx, elayers, color_col, edge_index)
edge_table = create_dash_edge_table(net, edge_index)
node_table = create_dash_node_table(net, nmeta, vlayers, None)
if (nfound):
node = edge_index = points[0]['customdata']
node_table = create_dash_node_table(net, nmeta, vlayers, node)
edge_table = create_dash_edge_table(net, None)
return [scatterplot, edge_table, node_table]
raise dash.exceptions.PreventUpdate
@app.callback(
[dash.dependencies.Output('sample-table', 'children')],
[dash.dependencies.Input('edge-expression-3dview', 'clickData')])
def update_sample_table(clickData):
if (clickData):
sample = clickData['points'][0]['text']
return [create_dash_sample_table(net, amx, sample)]
raise dash.exceptions.PreventUpdate
@app.callback(
dash.dependencies.Output('current-network-3dview-camera', 'value'),
[dash.dependencies.Input('network-3dview', 'relayoutData')])
def set_current_camera(relayoutData):
if (relayoutData):
if 'scene.camera' in relayoutData.keys():
camera = json.dumps(relayoutData["scene.camera"])
return camera
raise dash.exceptions.PreventUpdate
@app.callback(
dash.dependencies.Output('current-network-3dview-aspect', 'value'),
[dash.dependencies.Input('network-3dview', 'relayoutData')])
def set_network_aspect(relayoutData):
if (relayoutData):
if 'scene.aspectratio' in relayoutData.keys():
aspect = json.dumps(relayoutData["scene.aspectratio"])
return aspect
raise dash.exceptions.PreventUpdate
@app.callback(
dash.dependencies.Output('network-3dview', 'figure'),
[dash.dependencies.Input('edge-color-select', 'value'),
dash.dependencies.Input('edge-layer-select', 'value')],
[dash.dependencies.State('current-network-3dview-camera', 'value'),
dash.dependencies.State('current-network-3dview-aspect', 'value')]
)
def update_network_plot(color_by, layer_by, camera_vals, aspect_vals):
camera = None
aspect = None
if (type(camera_vals) == str):
camera = json.loads(camera_vals)
if (type(aspect_vals) == str):
aspect = json.loads(aspect_vals)
if not camera and not aspect:
raise dash.exceptions.PreventUpdate
return create_network_plot(net, vlayers, elayers, color_by, layer_by, camera, aspect)
@app.callback(
dash.dependencies.Output('edge-select-box-contents', 'style'),
[dash.dependencies.Input('edge-select-box-toggle', 'n_clicks')]
)
def toggle_edge_select_box(toggle):
if (toggle % 2 == 1):
return {'margin' : '0px', 'visibility' : 'visible', 'padding' : '10px'}
else:
return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}
@app.callback(
dash.dependencies.Output('scatterplot-box-contents', 'style'),
[dash.dependencies.Input('scatterplot-box-toggle', 'n_clicks')]
)
def toggle_scatterplot_box(toggle):
if (toggle % 2 == 1):
return {'margin' : '0px', 'visibility' : 'visible', 'max-height' : '500px', 'padding' : '10px'}
else:
return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}
@app.callback(
dash.dependencies.Output('sample-table-box-contents', 'style'),
[dash.dependencies.Input('sample-table-box-toggle', 'n_clicks')]
)
def toggle_sample_table_box(toggle):
if (toggle % 2 == 1):
return {
'margin' : '0px', 'visibility' : 'visible',
'max-height' : '250px', 'padding' : '10px',
'overflow-y': 'auto',
}
else:
return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}
@app.callback(
dash.dependencies.Output('network-stats-box-contents', 'style'),
[dash.dependencies.Input('network-stats-box-toggle', 'n_clicks')]
)
def toggle_network_stats_box(toggle):
if (toggle % 2 == 0):
return {'margin' : '0px', 'visibility' : 'visible', 'padding' : '10px'}
else:
return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}
@app.callback(
dash.dependencies.Output('node-table-box-contents', 'style'),
[dash.dependencies.Input('node-table-box-toggle', 'n_clicks')]
)
def toggle_node_table_box(toggle):
if (toggle % 2 == 1):
return {
'margin' : '0px', 'visibility' : 'visible',
'max-height' : '250px', 'padding' : '10px',
'overflow-y': 'auto',
}
else:
return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}
@app.callback(
dash.dependencies.Output('edge-table-box-contents', 'style'),
[dash.dependencies.Input('edge-table-box-toggle', 'n_clicks')]
)
def toggle_edge_table_box(toggle):
if (toggle % 2 == 1):
return {
'margin' : '0px', 'visibility' : 'visible',
'max-height' : '250px', 'padding' : '10px',
'overflow-y': 'auto',
}
else:
return {'margin' : '0px', 'visibility' : 'hidden', 'height' : '0px', 'padding' : '0px'}
return app
def is_port_in_use(port):
"""
Checks if a port is already in use.
port: the desired port to use
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
def main():
"""
The main function.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--net', dest='net_path', type=str, required=True, help="(required) The path to the KINC-derived network file")
parser.add_argument('--emx', dest='gem_path', type=str, required=True, help="(retuired) The path to the log2 transformed Gene Expression Matrix or Metabolite abundance matrix.")
parser.add_argument('--amx', dest='amx_path', type=str, required=True, help="(required) The path to the tab-delimited annotation matrix. The matrix must have at least one column that contains a unique list of sample names.")
parser.add_argument('--sample_col', dest='sample_col', type=str, required=False, default='Sample', help="(optional) The name of the column in the annotation matrix that contains the unique sample names. Defaults to 'Sample'")
parser.add_argument('--nmeta', dest='nmeta', type=str, required=False, help="(optional) The path to a tab-delimited node meta data file. The format of the file must have 4 columns, with the first containing the node name, the second a controlled vocabulary term ID, the third the term definition and the fourth the vocubulary name.")
parser.add_argument('--debug', dest='debug', action='store_true', default=False, help="(optional). Add this argument to enable Dash application debugging mode.")
parser.add_argument('--redo-layout', dest='redo_layout', action='store_true', default=False, help=" (optional). If the 2D and 3D network layout has already been constructed it will be loaded from a file. Add this arugment to force the layouts to be rebuilt and not loaded from the files. To prevent Dash from rerunning the layout on callbacks, this option results in the program terminating. To view the application, restart without this option.")
parser.add_argument('--iterations', dest='iterations', type=int, default=100, help="(optional). The number of iterations to perform when calculating the Force Atlas2 layout. This argument is only used the first time a network is viewed or if the --redo_layout argument is provided.")
args = parser.parse_args()
# Make sure the paths exist
if not os.path.exists(args.net_path):
print ("ERROR: The network file cannot be found: {}".format(args.net_path))
exit(1)
if not os.path.exists(args.gem_path):
print ("ERROR: The expression matrix file cannot be found: {}".format(args.gem_path))
exit(1)
if not os.path.exists(args.amx_path):
print ("ERROR: The annotation matrix file cannot be found: {}".format(args.amx_path))
exit(1)
if not args.nmeta is None:
if not os.path.exists(args.nmeta):
print ("ERROR: The node metadata file cannot be found: {}".format(args.nmeta))
exit(1)
# Load the input data.
print("Reading network file...")
net = load_network(args.net_path)
print("Reading GEM file...")
gem = load_gem(args.gem_path)
print("Reading experioment annotation file...")
amx = load_amx(args.amx_path, args.sample_col)
nmeta = None
if not args.nmeta is None:
print("Reading the node metadata file...")
nmeta = load_node_meta(args.nmeta)
# Get the filename of the network file minus the extension.
(net_prefix, net_ext) = os.path.splitext(os.path.basename(args.net_path))
# Calculate a 2D layout for the network
glayout = calculate_2d_layout(net, net_prefix, args.redo_layout, args.iterations)
# Calculate the Z-coorinate positions for the verticies and edges.
bin_edges(net)
vlayers = get_vertex_zlayers(net, glayout, net_prefix, args.redo_layout)
elayers = get_edge_zlayers(net, glayout, net_prefix, args.redo_layout)
# If the user requested we rebuild the layout then terminate so Dash
# doesn't try to rebuild the layout on each callback.
if args.redo_layout:
print ("Layouts have been built. Please relaunch without the --redo-layout option to view the app.")
exit(0)
# Launch the dash application
print("Launching application...")
app = build_application(net, gem, amx, nmeta, vlayers, elayers, args.sample_col, net_prefix)
port = 8050
while(is_port_in_use(port)):
port = port + 1
app.run_server(debug=args.debug, port=port)
exit(0)
if __name__ == "__main__":
main()
| mit |
massmutual/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 31 | 50760 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
hijiangtao/statePrediction | util/dbscanPOI.py | 1 | 3028 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os
import numpy as np
from sklearn.cluster import DBSCAN
class DBScanPOI(object):
def __init__(self, PROP):
super(DBScanPOI, self).__init__()
self.INPUT_DIRECTORY = PROP['IDIRECTORY']
self.OUTPUT_PATH = os.path.join(PROP['ODIRECTORY'], 'clusterPOI')
self.PIDLngLatList = {}
self.msNum = PROP['clusterNum']
self.msFile = PROP['msFile']
self.msOptSubFix = PROP['msOptSubFix']
self.dbscanBaseNum = 0
self.PIDList = [[] for x in xrange(0, PROP['clusterNum'])] # 用于识别 PID 以及结果聚合
self.PClusterVec = [[] for x in xrange(0, PROP['clusterNum'])] # 用于聚类
self.PClusterRes = []
def run(self, eps, min_samples):
ipoifile = os.path.join(self.INPUT_DIRECTORY, 'baseData', 'mongoUTF8.csv')
imsfile = os.path.join(self.INPUT_DIRECTORY, 'clusterPOI', self.msFile)
self.constructPOILngLatList(ipoifile)
self.constructPOIMatrix(imsfile)
self.dbscanProcess(eps, min_samples)
self.outputToFile("_eps_%f_sam_%d" % (eps, min_samples))
def constructPOILngLatList(self, file):
with open(file, 'rb') as f:
for line in f:
line = line.strip('\n')
linelist = line.split(',')
pid = linelist[0]
lng = float(linelist[5])
lat = float(linelist[6])
self.PIDLngLatList[pid] = [lng, lat]
f.close()
def constructPOIMatrix(self, file):
with open(file, 'rb') as f:
for line in f:
line = line.strip('\n')
linelist = line.split(',')
pid = linelist[0]
cid = int(linelist[1])
self.PIDList[cid].append(line)
# print cid, pid
self.PClusterVec[cid].append(self.PIDLngLatList[pid])
f.close()
def dbscanProcess(self, eps, min_samples):
# ######################
# Compute DBSCAN
for x in xrange(0, self.msNum):
X = self.PClusterVec[x]
db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
A = np.array(self.PIDList[x])[:, np.newaxis]
index = 0
while index < len(labels):
if labels[index] != -1:
labels[index] += self.dbscanBaseNum
index += 1
C = np.array(labels)[:, np.newaxis]
res = np.hstack((A, C))
res = ["%s,%s" % (e[0], e[1]) for e in res]
self.PClusterRes += res
# print "PIDList [0]: %s, res [0]: %s" % (self.PIDList[x][0], res[0])
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
self.dbscanBaseNum += n_clusters_
print "MS No.%d, DS Cluster number: %d" % (x, n_clusters_)
print "number of dbscan clusters in all: %d" % (self.dbscanBaseNum)
def outputToFile(self, dsOptSubFix):
"""
通用输出文件函数
:param self:
:param res:
"""
res = self.PClusterRes
ostream = '\n'.join(res)
fileName = 'dbscanResult%s%s' % (self.msOptSubFix, dsOptSubFix)
ofile = os.path.join(self.OUTPUT_PATH, fileName)
with open(ofile, 'wb') as f:
f.write(ostream)
f.close()
| apache-2.0 |
btabibian/scikit-learn | examples/neural_networks/plot_mlp_alpha.py | 47 | 4159 | """
================================================
Varying regularization in Multi-layer Perceptron
================================================
A comparison of different values for regularization parameter 'alpha' on
synthetic datasets. The plot shows that different alphas yield different
decision functions.
Alpha is a parameter for regularization term, aka penalty term, that combats
overfitting by constraining the size of the weights. Increasing alpha may fix
high variance (a sign of overfitting) by encouraging smaller weights, resulting
in a decision boundary plot that appears with lesser curvatures.
Similarly, decreasing alpha may fix high bias (a sign of underfitting) by
encouraging larger weights, potentially resulting in a more complicated
decision boundary.
"""
print(__doc__)
# Author: Issam H. Laradji
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
h = .02 # step size in the mesh
alphas = np.logspace(-5, 3, 5)
names = []
for i in alphas:
names.append('alpha ' + str(i))
classifiers = []
for i in alphas:
classifiers.append(MLPClassifier(alpha=i, random_state=1))
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=0, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable]
figure = plt.figure(figsize=(17, 9))
i = 1
# iterate over datasets
for X, y in datasets:
# preprocess dataset, split into training and test part
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='black', s=25)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6, edgecolors='black', s=25)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
CopyChat/Plotting | Downscaling/downscaling.AFR.rsds.py | 1 | 25504 | #!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import math
import numpy as np
import pylab as pl
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
from mpl_toolkits.basemap import Basemap , addcyclic
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
ObsRef=1
########################## for CMIP5 charactors
VARIABLE='rsds'
PRODUCT='Amon'
AbsTemp=273.15
RefTemp=5
CERESmean=225.02 #2001-2010
TargetModel=[\
#'CCSM4',\
#'CESM1-BGC',\
#'CESM1-CAM5',\
#'CESM1-FASTCHEM',\
#'CESM1-WACCM',\
#'CNRM-CM5',\
#'CSIRO-Mk3-6-0',\
#'CanESM2',\
'EC-EARTH',\
#'GFDL-ESM2G',\
#'GFDL-ESM2M',\
#'GISS-E2-H',\
#'GISS-E2-R-CC',\
#'HadGEM2-AO',\
#'HadGEM2-CC',\
#'HadGEM2-ES',\
#'IPSL-CM5A-LR',\
#'IPSL-CM5A-MR',\
#'MIROC-ESM-CHEM',\
#'MIROC-ESM',\
#'MIROC5',\
#'MPI-ESM-LR',\
#'MPI-ESM-MR',\
#'MPI-ESM-P',\
#'MRI-CGCM3',\
#'NorESM1-ME',\
#'bcc-csm1-1-m',\
#'bcc-csm1-1',\
#'inmcm4',\
]
GCMsCol=[\
'Magenta',\
'Snow',\
]
COLORtar=[\
'yellow','Darkturquoise','lime','deeppink',\
'red','darkmagenta','navy',\
'deeppink','orange','orangered','yellow','gold','brown','chocolate',\
'green','yellowgreen','aqua','olive','teal','blue',\
'purple','darkmagenta','fuchsia','indigo',\
'dimgray','black','navy']
COLORall=['yellow','Darkturquoise','Lime','deeppink',\
'orangered','blue','green','pink','gold',\
'lime','lightcyan','orchid','yellow','lightsalmon',\
'brown','khaki','aquamarine','yellowgreen','blueviolet',\
'snow','skyblue','slateblue','orangered','dimgray',\
'chocolate','teal','mediumvioletred','gray','cadetblue',\
'mediumorchid','bisque','tomato','hotpink','firebrick',\
'Chartreuse','purple','goldenrod',\
'black','orangered','cyan','magenta']
linestyles=['-', '-', '-', '-', '-',\
'-', '--','--','--', '--',\
'_', '_','_','_',\
'_', '_','_','_',\
'_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':','_', '-', '--', ':']
RCMsPlotIndex=[\
5,\
6,\
7,\
8,\
]
RCMsPlotLabels=[\
'EC-EARTH + CCLM4',\
'EC-EARTH + RCA4',\
'EC-EARTH + RACMO22T',\
'EC-EARTH + HIRHAM5',\
]
RCMsHist=[\
'rsds_AFR-44_CCCma-CanESM2_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_CNRM-CERFACS-CNRM-CM5_historical_r1i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_CNRM-CERFACS-CNRM-CM5_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_CSIRO-QCCCE-CSIRO-Mk3-6-0_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_ICHEC-EC-EARTH_historical_r12i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_ICHEC-EC-EARTH_historical_r12i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_ICHEC-EC-EARTH_historical_r1i1p1_KNMI-RACMO22T_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_ICHEC-EC-EARTH_historical_r3i1p1_DMI-HIRHAM5_v2_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_IPSL-IPSL-CM5A-MR_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_MIROC-MIROC5_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_MOHC-HadGEM2-ES_historical_r1i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_MOHC-HadGEM2-ES_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_MPI-M-MPI-ESM-LR_historical_r1i1p1_CLMcom-CCLM4-8-17_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_MPI-M-MPI-ESM-LR_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_NCC-NorESM1-M_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
'rsds_AFR-44_NOAA-GFDL-GFDL-ESM2M_historical_r1i1p1_SMHI-RCA4_v1_196001-200512.ymean.fldmean.nc',\
]
RCMsRCP85=[\
'rsds_AFR-44_CCCma-CanESM2_rcp85_r1i1p1_SMHI-RCA4_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_CNRM-CERFACS-CNRM-CM5_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_CNRM-CERFACS-CNRM-CM5_rcp85_r1i1p1_SMHI-RCA4_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_CSIRO-QCCCE-CSIRO-Mk3-6-0_rcp85_r1i1p1_SMHI-RCA4_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_ICHEC-EC-EARTH_rcp85_r12i1p1_CLMcom-CCLM4-8-17_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_ICHEC-EC-EARTH_rcp85_r12i1p1_SMHI-RCA4_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_ICHEC-EC-EARTH_rcp85_r1i1p1_KNMI-RACMO22T_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_ICHEC-EC-EARTH_rcp85_r3i1p1_DMI-HIRHAM5_v2_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_IPSL-IPSL-CM5A-MR_rcp85_r1i1p1_SMHI-RCA4_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_MIROC-MIROC5_rcp85_r1i1p1_SMHI-RCA4_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_MOHC-HadGEM2-ES_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_mon_200601-210012.ymean.fldmean.nc',\
#'rsds_AFR-44_MOHC-HadGEM2-ES_rcp85_r1i1p1_KNMI-RACMO22T_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_MOHC-HadGEM2-ES_rcp85_r1i1p1_SMHI-RCA4_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_MPI-M-MPI-ESM-LR_rcp85_r1i1p1_CLMcom-CCLM4-8-17_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_MPI-M-MPI-ESM-LR_rcp85_r1i1p1_SMHI-RCA4_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_NCC-NorESM1-M_rcp85_r1i1p1_SMHI-RCA4_v1_mon_200601-210012.ymean.fldmean.nc',\
'rsds_AFR-44_NOAA-GFDL-GFDL-ESM2M_rcp85_r1i1p1_SMHI-RCA4_v1_mon_200601-210012.ymean.fldmean.nc',\
]
GCMsRCP85=[\
'ACCESS1-0',\
'ACCESS1-3',\
'BNU-ESM',\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CMCC-CESM',\
'CMCC-CMS',\
'CMCC-CM',\
'CNRM-CM5',\
'CSIRO-Mk3-6-0',\
'CanESM2',\
'EC-EARTH',\
'FIO-ESM',\
'GFDL-CM3',\
'GFDL-ESM2G',\
'GFDL-ESM2M',\
'GISS-E2-H-CC',\
'GISS-E2-H',\
'GISS-E2-R-CC',\
'GISS-E2-R',\
'HadGEM2-AO',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'IPSL-CM5B-LR',\
'MIROC-ESM-CHEM',\
'MIROC-ESM',\
'MIROC5',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'MRI-CGCM3',\
'NorESM1-ME',\
'NorESM1-M',\
'bcc-csm1-1',\
'inmcm4',\
]
#================================================ CMIP5 models
# for historical
GCMsHist=[\
'ACCESS1-0',\
'ACCESS1-3',\
'BNU-ESM',\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CESM1-FASTCHEM',\
'CESM1-WACCM',\
'CMCC-CESM',\
'CNRM-CM5',\
'CNRM-CM5',\
'CSIRO-Mk3-6-0',\
'CanESM2',\
'EC-EARTH',\
'FIO-ESM',\
'GFDL-ESM2M',\
'GISS-E2-H',\
'HadGEM2-AO',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'MIROC-ESM-CHEM',\
'MIROC-ESM',\
'MIROC5',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'MPI-ESM-P',\
'MRI-CGCM3',\
'MRI-ESM1',\
'NorESM1-ME',\
'NorESM1-M',\
'bcc-csm1-1-m',\
'bcc-csm1-1',\
'inmcm4',\
]
EnsembleHist=[\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r3i1p1',\
'r1i1p1',\
'r1i1p1',\
'r12i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
]
EnsembleRCP85=[\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
]
#=================================================== define the Plot:
fig1=plt.figure(figsize=(16,9))
ax = fig1.add_subplot(111)
plt.xlabel('Year',fontsize=16)
plt.ylabel('Surface Downwelling SW radiation change (W m-2)',fontsize=16)
plt.title("Surface Downwelling SW radiation change (W m-2) in AFRICA simulated by CMIP5 models",fontsize=18)
if ObsRef==1:
plt.ylim(-60,60)
else:
plt.ylim(-10,10)
plt.xlim(1960,2099)
plt.grid()
#plt.xticks(np.arange(1960, 2100+10, 20))
#plt.xticks(1960,1980,2000,2020,2040,2060,2080,2099)
plt.xticks([1960,1980,2000,2020,2040,2060,2080,2099])
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
# vertical at 2005
plt.axvline(x=2005.5,linewidth=2, color='gray')
plt.axhline(y=0,linewidth=2, color='gray')
########################## for hist:
########################## for hist:
#============================ for CORDEX
#============================ for CORDEX
EXPERIMENT='CORDEX'
DirCordexHist='/Users/tang/climate/CORDEX/hist/AFRICA/'
YEAR=range(1960,2006)
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for infile0 in RCMsHist:
infile1=DirCordexHist+infile0
K=K+1 # for average
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if ObsRef==1:
RefTemp=CERESmean
else:
# reference temp: mean of 2001-2005
RefTemp=np.mean(TEMP[len(YEAR)-10+1:len(YEAR)])
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
if K in RCMsPlotIndex:
plt.plot(YEAR,TEMP,\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[RCMsPlotIndex.index(K)],linewidth=2)
print "color is",COLORtar[RCMsPlotIndex.index(K)]
print "k is in the RCMsPlotIndex"
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
#print ArrTemp
print "StdTemp ========================:"
#print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print RCMsHist
plt.plot(YEAR,AveTemp,label=" CORDEX mean", color="blue",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='blue',alpha=0.3)
if ObsRef==0:
# draw NO. of model used:
plt.text(1980,-8,'CORDEX Hist model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
#))
)
else:
plt.text(1980,-35,'CORDEX Hist model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
#))
)
#=================================================== for CORDEX RCP85
#=================================================== for CORDEX RCP85
DirCordexRcp85='/Users/tang/climate/CORDEX/rcp85/AFRICA/'
YEAR=range(2006,2101)
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for infile0 in RCMsRCP85:
infile1=DirCordexRcp85+infile0
K=K+1 # for average
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if ObsRef==1:
RefTemp=CERESmean
else:
# reference temp: mean of 1996-2005
# get the reftemp if the model has historical data here
print 'ArrRefTemp in HIST ensembles:',np.shape(ArrRefTemp)
print ArrRefTemp
print 'model index in HIST: ',RCMsRCP85.index(infile0)
RefTemp=ArrRefTemp[RCMsRCP85.index(infile0)]
print 'RefTemp from HIST: ',RefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
if K in RCMsPlotIndex:
plt.plot(YEAR,TEMP,label=RCMsPlotLabels[RCMsPlotIndex.index(K)],\
#linestyles[TargetModel.index(Model)],\
color=COLORtar[RCMsPlotIndex.index(K)],linewidth=2)
print "color is",COLORtar[RCMsPlotIndex.index(K)]
print "k is in the RCMsPlotIndex"
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print RCMsRCP85
plt.plot(YEAR,AveTemp,color="blue",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='blue',alpha=0.3)
if ObsRef==0:
# draw NO. of model used:
plt.text(2030,-8,'CORDEX RCP8.5 model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
#))
)
else:
plt.text(2030,-35,'CORDEX RCP8.5 model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
#))
)
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
#=================================================== for CMIP5 hist
DirCMIP5Hist='/Users/tang/climate/CMIP5/hist/AFRICA'
TAILhist='_196001-200512.ymean.fldmean.AFR.nc'
EXPERIMENT='historical'
YEAR=range(1960,2006)
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for Model in GCMsHist:
K=K+1 # for average
infile1=DirCMIP5Hist+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_'+EnsembleHist[GCMsHist.index(Model)]+TAILhist
#clt_Amon_MPI-ESM-LR_historical_r1i1p1_196001-200512.fldmean.AFR.nc
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
print " temp ======================== absolut"
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
#print TEMP
if ObsRef==1:
RefTemp=CERESmean
else:
# reference temp: mean of 1996-2005
RefTemp=np.mean(TEMP[len(YEAR)-10+1:len(YEAR)])
if K==1:
ArrRefTemp=[RefTemp]
else:
ArrRefTemp=ArrRefTemp+[RefTemp]
print 'ArrRefTemp ========== ',ArrRefTemp
TEMP=[t-RefTemp for t in TEMP]
#print " temp ======================== relative to mean of 1986-2005"
#print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,\
#linestyles[TargetModel.index(Model)],\
color=GCMsCol[1],linewidth=4)
print "color is",GCMsCol[0]
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print GCMsHist
plt.plot(YEAR,AveTemp,label=' CMIP5 mean',color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
if ObsRef==0:
# draw NO. of model used:
plt.text(1980,-6,'CMIP5 Hist model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
#))
)
else:
plt.text(1980,-30,'CMIP5 Hist model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
#))
)
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
#=================================================== for CMIP5 rcp8.5:
DirCMIP5RCP85='/Users/tang/climate/CMIP5/rcp85/AFRICA/'
EXPERIMENT='rcp85'
TailRcp85='_200601-210012.ymean.fldmean.AFR.nc'
YEAR=range(2006,2101)
Nmonth=1140
SumTemp=np.zeros(len(YEAR))
K=0
print "========== for",EXPERIMENT," ==============="
for Model in GCMsRCP85:
K=K+1 # for average
infile1=DirCMIP5RCP85+'/'\
+VARIABLE+'_'+PRODUCT+'_'+Model+'_'+EXPERIMENT+'_'+EnsembleRCP85[K-1]+TailRcp85
#clt_Amon_MPI-ESM-LR_historical_r1i1p1_196001-200512.fldmean.AFR.nc
print('the file is == ' +infile1)
#open input files
infile=IO.NetCDFFile(infile1,'r')
# read the variable tas
TAS=infile.variables[VARIABLE][:,:,:].copy()
#print 'the variable tas ===============: '
#print TAS
# calculate the annual mean temp:
#TEMP=range(0,Nmonth,12)
#for j in range(0,Nmonth,12):
#TEMP[j/12]=np.mean(TAS[j:j+11][:][:])-AbsTemp
TEMP=range(0,len(YEAR))
for j in range(0,len(YEAR)):
TEMP[j]=np.mean(TAS[j][:][:])
print " temp ======================== absolut"
print TEMP
if ObsRef==1:
RefTemp=CERESmean
else:
# reference temp: mean of 1996-2005
# get the reftemp if the model has historical data here
print 'ArrRefTemp in HIST ensembles:',np.shape(ArrRefTemp)
print ArrRefTemp
if Model in GCMsHist:
print 'model index in HIST: ',GCMsHist.index(Model)
print 'K=',K
RefTemp=ArrRefTemp[GCMsHist.index(Model)]
print 'RefTemp from HIST: ',RefTemp
else:
RefTemp=np.mean(TEMP[0:9])
print 'RefTemp from RCP8.5: ',RefTemp
TEMP=[t-RefTemp for t in TEMP]
print " temp ======================== relative to mean of 1986-2005"
print TEMP
# get array of temp K*TimeStep
if K==1:
ArrTemp=[TEMP]
else:
ArrTemp=ArrTemp+[TEMP]
SumTemp=SumTemp+TEMP
#print SumTemp
#=================================================== to plot
print "======== to plot =========="
print len(TEMP)
print 'NO. of year:',len(YEAR)
print 'NO. of timesteps on TEMP:',len(TEMP)
#plot only target models
if Model in TargetModel:
plt.plot(YEAR,TEMP,label=Model,\
#linestyles[TargetModel.index(Model)],\
color=GCMsCol[1],linewidth=4)
print "color is",GCMsCol[0]
#=================================================== for ensemble mean
AveTemp=[e/K for e in SumTemp]
ArrTemp=list(np.array(ArrTemp))
print 'shape of ArrTemp:',np.shape(ArrTemp)
StdTemp=np.std(np.array(ArrTemp),axis=0)
print 'shape of StdTemp:',np.shape(StdTemp)
print "ArrTemp ========================:"
print ArrTemp
print "StdTemp ========================:"
print StdTemp
# 5-95% range ( +-1.64 STD)
StdTemp1=[AveTemp[i]+StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
StdTemp2=[AveTemp[i]-StdTemp[i]*1.64 for i in range(0,len(StdTemp))]
print "Model number for historical is :",K
print "models for historical:";print GCMsHist
plt.plot(YEAR,AveTemp,color="black",linewidth=4)
plt.plot(YEAR,StdTemp1,color="black",linewidth=0.1)
plt.plot(YEAR,StdTemp2,color="black",linewidth=0.1)
plt.fill_between(YEAR,StdTemp1,StdTemp2,color='black',alpha=0.3)
if ObsRef==0:
# draw NO. of model used:
plt.text(2030,-6,'CMIP5 RCP8.5 model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
#))
)
else:
plt.text(2030,-30,'CMIP5 RCP8.5 model: '+str(K),size=16,rotation=0.,
ha="center",va="center",
#bbox = dict(boxstyle="round",
#ec=(1., 0.5, 0.5),
#fc=(1., 0.8, 0.8),
#))
)
plt.legend(loc=2)
plt.show()
quit()
| gpl-3.0 |
lcharleux/compmod | doc/sandbox/laurent/ring_compression_opti_new.py | 1 | 5818 | # SOME OPTIMIZATION WITH RING COMPRESSION
from abapy.materials import Hollomon
from compmod.models import RingCompression
from scipy import interpolate
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import numpy as np
import pickle, copy
import platform
#FIXED PAREMETERS
settings = {}
settings['file_name'] = 'test_expD2.txt'
settings['inner_radius'], settings['outer_radius'] = 45.2 , 48.26
<<<<<<< HEAD
settings['Nt'], settings['Nr'], settings['Na'] = 10, 4, 2
=======
settings['Nt'], settings['Nr'], settings['Na'] = 100, 10, 5
>>>>>>> bc2225581408566e3337ede72bd1e0ab32aa5c44
settings['Ne'] = settings['Nt']*settings['Nr']*settings['Na']
settings['displacement'] = 45.
settings['nFrames'] = 100
settings['E'] = 71413.
settings['nu'] = .3
<<<<<<< HEAD
settings['iteration'] = 2
=======
settings['iteration'] = 1
>>>>>>> bc2225581408566e3337ede72bd1e0ab32aa5c44
settings['thickness'] = 14.92
workdir = "workdir/"
label = "ringCompression_opti"
elType = "CPS4"
cpus = 1
node = platform.node()
if node == 'lcharleux': abqlauncher = '/opt/Abaqus/6.9/Commands/abaqus' # Ludovic
if node == 'serv2-ms-symme': abqlauncher = '/opt/abaqus/Commands/abaqus' # Linux
if node == 'epua-pd47':
abqlauncher = 'C:/SIMULIA/Abaqus/6.11-2/exec/abq6112.exe' # Local machine configuration
if node == 'SERV3-MS-SYMME':
abqlauncher = '"C:/Program Files (x86)/SIMULIA/Abaqus/6.11-2/exec/abq6112.exe"' # Local machine configuration
if node == 'epua-pd45':
abqlauncher = 'C:\SIMULIA/Abaqus/Commands/abaqus'
def read_file(file_name):
'''
Read a two rows data file and converts it to numbers
'''
f = open(file_name, 'r') # Opening the file
lignes = f.readlines() # Reads all lines one by one and stores them in a list
f.close() # Closing the file
# lignes.pop(0) # Delete le saut de ligne for each lines
force_exp, disp_exp = [],[]
for ligne in lignes:
data = ligne.split() # Lines are splitted
disp_exp.append(float(data[0]))
force_exp.append(float(data[1]))
return -np.array(disp_exp), -np.array(force_exp)
class Simulation(object):
def __init__(self, sy, n, settings):
self.sy = sy
self.n = n
self.settings = settings
def Run(self):
"""
Runs a simulation for a given couple (sy, n) and returns the (disp, force) couple.
"""
#MODEL DEFINITION
sy = self.sy
n = self.n
E = self.settings['E']
nu = self.settings['nu']
inner_radius = self.settings['inner_radius']
outer_radius = self.settings['outer_radius']
disp = self.settings['displacement']/2.
nFrames = self.settings['nFrames']
Nr = self.settings['Nr']
Nt = self.settings['Nt']
Na = self.settings['Na']
Ne = self.settings['Ne']
thickness = self.settings['thickness']
print E, nu, sy, n
material = Hollomon(
labels = "SAMPLE_MAT",
E = E, nu = nu,
sy = sy, n = n)
m = RingCompression( material = material ,
inner_radius = inner_radius,
outer_radius = outer_radius,
disp = disp,
thickness = thickness,
nFrames = nFrames,
Nr = Nr,
Nt = Nt,
Na = Na,
workdir = workdir,
label = label,
elType = elType,
abqlauncher = abqlauncher,
cpus = cpus,
<<<<<<< HEAD
is_3D = True)
=======
is_3D =True)
>>>>>>> bc2225581408566e3337ede72bd1e0ab32aa5c44
# SIMULATION
m.MakeMesh()
m.MakeInp()
m.Run()
m.PostProc()
outputs = m.outputs
force = -2. * outputs['history']['force']
disp = -2. * outputs['history']['disp']
self.disp = disp
self.force = force
def Interp(self):
"""
Interpolate the curve Force-displacement on a known grid
"""
disp, force = self.disp, self.force
f = interpolate.interp1d(disp.data[0], force.data[0])
return f
class Opti(object):
def __init__(self, sy0, n0, settings):
self.sy0 = sy0
self.n0 = n0
self.settings = settings
self.sy = []
self.n = []
self.err = []
self.force_sim = []
disp_exp, force_exp = read_file(self.settings['file_name'])
g = interpolate.interp1d(disp_exp, force_exp)
self.disp_exp = disp_exp
self.force_exp = force_exp
d = self.settings['displacement']
self.disp_grid = np.linspace(0., d, 1000)
self.force_exp_grid= g(self.disp_grid)
def Err(self, param):
"""
Compute the residual error between experimental and simulated curve
"""
sy = param[0]
n =param[1]
disp_grid = self.disp_grid
s = Simulation(sy, n ,self.settings)
s.Run()
f = s.Interp()
force_sim = f(disp_grid)
force_exp_grid = self.force_exp_grid
err = np.sqrt(((force_exp_grid - force_sim)**2).sum())
self.sy.append(sy)
self.n.append(n)
self.err.append(err)
self.force_sim.append(force_sim)
return err
def Optimize(self):
p0 = [self.sy0, self.n0]
result = minimize(self.Err, p0, method='nelder-mead', options={'disp':True, 'maxiter':settings['iteration']})
self.result = result
O = Opti(150., 0.1, settings)
O.Optimize()
fig = plt.figure('Load vs. disp')
plt.clf()
plt.plot(O.disp_grid, O.force_exp_grid, 'k-', label = 'experimental curve', linewidth = 2.)
plt.plot(O.disp_grid, O.force_sim[0], 'g-', label = 'initial curve', linewidth = 2.)
a = O.err
index = np.argmin(a)
plt.plot(O.disp_grid, O.force_sim[index], 'r-', label = 'optimized curve', linewidth = 2.)
for i in range(1, settings['iteration']):
plt.plot(O.disp_grid, O.force_sim[i], 'b-', linewidth = .2)
#plt.plot(disp.data[1], force.data[1], 'b-', label = 'Unloading', linewidth = 2.)
plt.legend(loc="lower right")
plt.grid()
plt.xlabel('Displacement, $U$')
plt.ylabel('Force, $F$')
plt.savefig(workdir + label + '_load-vs-disp.pdf')
| gpl-2.0 |
0asa/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
mengyun1993/RNN-binary | rnn15-new.py | 1 | 30238 | """ Vanilla RNN
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import pickle as pickle
import math
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real', use_symbolic_softmax=False):
self.input = input
self.activation = activation
self.output_type = output_type
# when using HF, SoftmaxGrad.grad is not implemented
# use a symbolic softmax which is slightly slower than T.nnet.softmax
# See: http://groups.google.com/group/theano-dev/browse_thread/
# thread/3930bd5a6a67d27a
if use_symbolic_softmax:
def symbolic_softmax(x):
e = T.exp(x)
return e / T.sum(e, axis=1).dimshuffle(0, 'x')
self.softmax = symbolic_softmax
else:
self.softmax = T.nnet.softmax
# recurrent weights as a shared variable
W_init = np.asarray(np.random.uniform(size=(n_hidden, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W = theano.shared(value=W_init, name='W')
# input to hidden layer weights
W_in_init = np.asarray(np.random.uniform(size=(n_in, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_in = theano.shared(value=W_in_init, name='W_in')
# hidden to output layer weights
W_out_init = np.asarray(np.random.uniform(size=(n_hidden, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_out = theano.shared(value=W_out_init, name='W_out')
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.h0 = theano.shared(value=h0_init, name='h0')
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.bh = theano.shared(value=bh_init, name='bh')
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.by = theano.shared(value=by_init, name='by')
self.params = [self.W, self.W_in, self.W_out, self.h0,
self.bh, self.by]
# for every parameter, we maintain it's last update
# the idea here is to use "momentum"
# keep moving mostly in the same direction
self.updates = {}
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
self.updates[param] = theano.shared(init)
# recurrent function (using tanh activation function) and linear output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) + \
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
[self.h, self.y_pred], _ = theano.scan(step,
sequences=self.input,
outputs_info=[self.h0, None])
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.p_y_given_x = self.softmax(self.y_pred)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(BaseEstimator):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, L1_reg=0.00, L2_reg=0.00005, learning_rate_decay=1,
activation='tanh', output_type='real',
final_momentum=0.9, initial_momentum=0.5,
momentum_switchover=5,
use_symbolic_softmax=False):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
self.use_symbolic_softmax = use_symbolic_softmax
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.matrix()
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.matrix(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.matrix(name='y', dtype='int32')
elif self.output_type == 'softmax': # only vector labels supported
self.y = T.vector(name='y', dtype='int32')
else:
raise NotImplementedError
# initial hidden state of the RNN
self.h0 = T.vector()
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type,
use_symbolic_softmax=self.use_symbolic_softmax)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=T.round(self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
weights = [p.get_value() for p in self.rnn.params]
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
for param in self.rnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logging.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logging.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validation_frequency=100):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (n_seq x n_steps x n_in)
Y_train : ndarray (n_seq x n_steps x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
"""
f = file('../RNN-data/trainProcess/trainOutput-b15-2220-720-60.txt','a+')
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
n_train = train_set_x.get_value(borrow=True).shape[0]
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[0]
######################
# BUILD ACTUAL MODEL #
######################
logging.info('... building the model')
index = T.lscalar('index') # index to a case
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
compute_train_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
if self.interactive:
compute_test_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: test_set_x[index],
self.y: test_set_y[index]},
mode=mode)
# compute the gradient of cost with respect to theta = (W, W_in, W_out)
# gradients on the weights using BPTT
gparams = []
for param in self.rnn.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = {}
for param, gparam in zip(self.rnn.params, gparams):
weight_update = self.rnn.updates[param]
upd = mom * weight_update - l_r * gparam
updates[weight_update] = upd
updates[param] = param + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(inputs=[index, l_r, mom],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
###############
# TRAIN MODEL #
###############
logging.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
for idx in xrange(n_train):
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
example_cost = train_model(idx, self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train + idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i)
for i in xrange(n_train)]
this_train_loss = np.mean(train_losses)
if self.interactive:
test_losses = [compute_test_error(i)
for i in xrange(n_test)]
this_test_loss = np.mean(test_losses)
f.write('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f \n' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
print('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
else:
f.write('epoch %i, seq %i/%i, train loss %f '
'lr: %f \n' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
print('epoch %i, seq %i/%i, train loss %f '
'lr: %f' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
self.learning_rate *= self.learning_rate_decay
f.close()
def test_real():
""" Test RNN with real-valued outputs. """
n_hidden = 200
n_in = 20
n_out = 5
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1
targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1
targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=400, activation='tanh')
model.fit(seq, targets, validation_frequency=1000)
[seqNum,lineNum,colNum] = targets.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[0])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[0])
guess = model.predict(seq[0])
guessed_targets = plt.plot(guess, linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
dif = abs(guess - targets[0])
[linedif,coldif] = dif.shape
print(linedif,coldif)
errorsum = 0
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[j][i] ** 2
error[i] = math.sqrt(sum/lineNum)
errorsum += error[i]
print(error[i])
print("average error = ", errorsum/colNum)
def test_binary(multiple_out=False, n_epochs=250):
""" Test RNN with binary outputs. """
n_hidden = 300
n_in = 36
n_out = 449
n_steps = 60
n_seq = 2220
np.random.seed(0)
# simple lag test
seqlist = []
count = 0
data = []
BASE_DIR = os.path.dirname(__file__)
file_path1 = os.path.join(BASE_DIR,"../RNN-data/traindata/inputdata-b15-60-60-38-n.txt")
for l in open(file_path1):
#for l in open("inputdata-b02-300-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
data.append(row)
if (count == n_steps):
count = 0
if len(data) >0:
seqlist.append(data)
data = []
seqarray = np.asarray(seqlist)
seq = seqarray[:,:,:n_in]
targets = seqarray[:,:,n_in:]
seqlistTest1 = []
count = 0
dataTest1 = []
file_path2 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputdata-b15-60-60-12.txt')
#file_path2 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputerror-b15-60-60-12-y.txt')
for l in open(file_path2):
#for l in open("inputdata-b02-100-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataTest1.append(row)
if (count == n_steps):
count = 0
if len(dataTest1) >0:
seqlistTest1.append(dataTest1)
dataTest1 = []
seqarrayTest1 = np.asarray(seqlistTest1)
seqTest1 = seqarrayTest1[:,:,:n_in]
targetsTest1 = seqarrayTest1[:,:,n_in:]
############## Add another Test ####################
seqlistTest2 = []
count = 0
dataTest2 = []
#file_path2 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputdata-b15-60-60-12.txt')
file_path4 = os.path.join(BASE_DIR, '../RNN-data/testdata/inputerror-b15-60-60-12-y.txt')
for l in open(file_path4):
#for l in open("inputdata-b02-100-10.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataTest2.append(row)
if (count == n_steps):
count = 0
if len(dataTest2) >0:
seqlistTest2.append(dataTest2)
dataTest2 = []
seqarrayTest2 = np.asarray(seqlistTest2)
seqTest2 = seqarrayTest2[:,:,:n_in]
targetsTest2 = seqarrayTest2[:,:,n_in:]
########### End add another Test ##############
######## Calculate change Frequency for each FF ##############
seqlistError = []
count = 0
dataError = []
file_path3 = os.path.join(BASE_DIR, '../RNN-data/traindata/inputerror-b15-60-60-38-n.txt')
for l in open(file_path3):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataError.append(row)
if (count == n_steps):
count = 0
if len(dataError) >0:
seqlistError.append(dataError)
dataError = []
seqarrayError = np.asarray(seqlistError)
targetsError = seqarrayError[:,:,n_in:]
[seqNum, lineNum, colNum] = targetsTest1.shape
freqArray = [None] * lineNum
for i in range (lineNum):
freqArray[i] = [0]*colNum
freqArrayNP = np.asarray(freqArray)
for i in range(seqNum):
freqArrayNP = freqArrayNP +abs(targets[i] - targetsError[i])
fmatrix = file('../RNN-data/matrix/freqMatrix-b15.txt','a+')
for i in range (lineNum):
for j in range(colNum):
fmatrix.write(str(freqArrayNP[i,j]))
fmatrix.write("\n")
fmatrix.close()
######### End Frequency Calculation #########################
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.11, learning_rate_decay=1.005,
n_epochs=n_epochs, activation='tanh', output_type='binary')
#model.fit(seq, targets, validation_frequency=1000)
model.fit(seq, targets, seqTest1, targetsTest1, validation_frequency=1000)
ferror1 = file('errorRate/errorRate-b15-no.txt','a+')
ferror2 = file('errorRate/errorRate-b15-single.txt','a+')
[seqNum,lineNum,colNum] = targetsTest1.shape
seqs = xrange(seqNum)
error = [0 for i in range(seqNum)]
errorsum = 0
for k in seqs:
guess1 = model.predict_proba(seqTest1[k])
dif1 = abs(guess1 - targetsTest1[k])
[lineDif,colDif] = dif1.shape
for i in range (1,lineDif):
for j in range (colDif):
if (dif1[i][j] > 0.5):
ferror1.write("1 ")
else:
ferror1.write("0 ")
ferror1.write("\n")
ferror1.close()
for k in seqs:
guess2 = model.predict_proba(seqTest2[k])
dif2 = abs(guess2 - targetsTest2[k])
[lineDif,colDif] = dif2.shape
for i in range (1,lineDif):
for j in range (colDif):
if (dif2[i][j] > 0.5):
ferror2.write("1 ")
else:
ferror2.write("0 ")
ferror2.write("\n")
ferror2.close()
## #print (seqTest.shape)
## seqs = xrange(seqNum)
## error = [0 for i in range(lineNum*seqNum)]
## errorsum = 0
## for k in seqs:
## guess = model.predict_proba(seqTest[k])
## dif = abs(guess - targetsTest[k])
## [lineDif,colDif] = dif.shape
## #print(lineDif,colDif)
## for i in range (lineDif):
## ki = k*lineDif+i
## for j in range (colDif):
## if (dif[i][j] > 0.5):
## error[ki] += 1
## ferror.write('error %d = %d \n' % (ki,error[ki]))
## if (error[ki]>0):
## errorsum += 1
## print(errorsum)
## errorRate = errorsum/1.0/seqNum/lineNum
## ferror.write("average error = %f \n" % (errorRate))
##
## seqs = xrange(1)
##
## [seqNum,lineNum,colNum] = targets.shape
## print(seqNum,lineNum,colNum)
## error = [0 for i in range(colNum)]
##
## plt.close('all')
## for seq_num in seqs:
## fig = plt.figure()
## ax1 = plt.subplot(211)
## plt.plot(seq[seq_num])
## ax1.set_title('input')
## ax2 = plt.subplot(212)
## true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
##
## guess = model.predict_proba(seq[seq_num])
## guessed_targets = plt.step(xrange(n_steps), guess)
## plt.setp(guessed_targets, linestyle='--', marker='d')
## for i, x in enumerate(guessed_targets):
## x.set_color(true_targets[i].get_color())
## ax2.set_ylim((-0.1, 1.1))
## ax2.set_title('solid: true output, dashed6 model output (prob)')
##
##
## dif = abs(guess - targets[seq_num])
## [lineDif,colDif] = dif.shape
## print(lineDif,colDif)
## errorsum = 0
## for i in range (colNum):
## for j in range (lineNum):
## if (dif[j][i] > 0.5):
## error[i] += 1
## print(error[i])
## errorsum += error[i]
## print("average error = ", errorsum/colNum)
def test_softmax(n_epochs=250):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 100
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps), dtype=np.int)
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh',
output_type='softmax', use_symbolic_softmax=False)
model.fit(seq, targets, validation_frequency=1000)
seqs = xrange(10)
[seqNum,lineNum,colNum] = seq.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input??')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
dif = abs(seq[seq_num] - targets[seq_num])
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[i,j] ** 2
error[i] = math.sqrt(sum/lineNum)
print(error[i])
if __name__ == "__main__":
##logging.basicConfig(
## level = logging.INFO,
## format = 'LINE %(lineno)-4d %(levelname)-8s %(message)s',
## datafmt = '%m-%d %H:%M',
## filename = "D:/logresult20160123/one.log",
## filemode = 'w')
t0 = time.time()
#test_real()
# problem takes more epochs to solve
test_binary(multiple_out=True, n_epochs=20)
#test_softmax(n_epochs=250)
print ("Elapsed time: %f" % (time.time() - t0))
| bsd-3-clause |
flightgong/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 5 | 10504 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
shutil.copyfileobj(open(datafile, "rb"), gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
shutil.copyfileobj(open(datafile, "rb"), BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
rlabbe/filterpy | filterpy/stats/stats.py | 1 | 32181 | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments, bad-whitespace
# pylint: disable=too-many-lines, too-many-locals, len-as-condition
# pylint: disable=import-outside-toplevel
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import math
from math import cos, sin
import random
import warnings
import numpy as np
from numpy.linalg import inv
import scipy.linalg as linalg
import scipy.sparse as sp
import scipy.sparse.linalg as spln
from scipy.stats import norm, multivariate_normal
# Older versions of scipy do not support the allow_singular keyword. I could
# check the version number explicily, but perhaps this is clearer
_support_singular = True
try:
multivariate_normal.logpdf(1, 1, 1, allow_singular=True)
except TypeError:
warnings.warn(
'You are using a version of SciPy that does not support the '\
'allow_singular parameter in scipy.stats.multivariate_normal.logpdf(). '\
'Future versions of FilterPy will require a version of SciPy that '\
'implements this keyword',
DeprecationWarning)
_support_singular = False
def _validate_vector(u, dtype=None):
# this is taken from scipy.spatial.distance. Internal function, so
# redefining here.
u = np.asarray(u, dtype=dtype).squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def mahalanobis(x, mean, cov):
"""
Computes the Mahalanobis distance between the state vector x from the
Gaussian `mean` with covariance `cov`. This can be thought as the number
of standard deviations x is from the mean, i.e. a return value of 3 means
x is 3 std from mean.
Parameters
----------
x : (N,) array_like, or float
Input state vector
mean : (N,) array_like, or float
mean of multivariate Gaussian
cov : (N, N) array_like or float
covariance of the multivariate Gaussian
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `x` and `mean`
Examples
--------
>>> mahalanobis(x=3., mean=3.5, cov=4.**2) # univariate case
0.125
>>> mahalanobis(x=3., mean=6, cov=1) # univariate, 3 std away
3.0
>>> mahalanobis([1., 2], [1.1, 3.5], [[1., .1],[.1, 13]])
0.42533327058913922
"""
x = _validate_vector(x)
mean = _validate_vector(mean)
if x.shape != mean.shape:
raise ValueError("length of input vectors must be the same")
y = x - mean
S = np.atleast_2d(cov)
dist = float(np.dot(np.dot(y.T, inv(S)), y))
return math.sqrt(dist)
def log_likelihood(z, x, P, H, R):
"""
Returns log-likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R
"""
S = np.dot(H, np.dot(P, H.T)) + R
return logpdf(z, np.dot(H, x), S)
def likelihood(z, x, P, H, R):
"""
Returns likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R
"""
return np.exp(log_likelihood(z, x, P, H, R))
def logpdf(x, mean=None, cov=1, allow_singular=True):
"""
Computes the log of the probability density function of the normal
N(mean, cov) for the data x. The normal may be univariate or multivariate.
Wrapper for older versions of scipy.multivariate_normal.logpdf which
don't support support the allow_singular keyword prior to verion 0.15.0.
If it is not supported, and cov is singular or not PSD you may get
an exception.
`x` and `mean` may be column vectors, row vectors, or lists.
"""
if mean is not None:
flat_mean = np.asarray(mean).flatten()
else:
flat_mean = None
flat_x = np.asarray(x).flatten()
if _support_singular:
return multivariate_normal.logpdf(flat_x, flat_mean, cov, allow_singular)
return multivariate_normal.logpdf(flat_x, flat_mean, cov)
def gaussian(x, mean, var, normed=True):
"""
returns probability density function (pdf) for x given a Gaussian with the
specified mean and variance. All must be scalars.
gaussian (1,2,3) is equivalent to scipy.stats.norm(2, math.sqrt(3)).pdf(1)
It is quite a bit faster albeit much less flexible than the latter.
Parameters
----------
x : scalar or array-like
The value(s) for which we compute the distribution
mean : scalar
Mean of the Gaussian
var : scalar
Variance of the Gaussian
normed : bool, default True
Normalize the output if the input is an array of values.
Returns
-------
pdf : float
probability distribution of x for the Gaussian (mean, var). E.g. 0.101 denotes
10.1%.
Examples
--------
>>> gaussian(8, 1, 2)
1.3498566943461957e-06
>>> gaussian([8, 7, 9], 1, 2)
array([1.34985669e-06, 3.48132630e-05, 3.17455867e-08])
"""
pdf = ((2*math.pi*var)**-.5) * np.exp((-0.5*(np.asarray(x)-mean)**2.) / var)
if normed and len(np.shape(pdf)) > 0:
pdf = pdf / sum(pdf)
return pdf
def mul(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF, so it is safe to treat the output as a PDF for any filter using
Bayes equation, which normalizes the result anyway.
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1 / (1/var1 + 1/var2)
return (mean, var)
def mul_pdf(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var, scale_factor).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF. `scale_factor` provides this proportionality constant
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
scale_factor : scalar
proportionality constant
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1. / (1./var1 + 1./var2)
S = math.exp(-(mean1 - mean2)**2 / (2*(var1 + var2))) / \
math.sqrt(2 * math.pi * (var1 + var2))
return mean, var, S
def add(mean1, var1, mean2, var2):
"""
Add the Gaussians (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean,var).
var1 and var2 are variances - sigma squared in the usual parlance.
"""
return (mean1+mean2, var1+var2)
def multivariate_gaussian(x, mu, cov):
"""
This is designed to replace scipy.stats.multivariate_normal
which is not available before version 0.14. You may either pass in a
multivariate set of data:
.. code-block:: Python
multivariate_gaussian (array([1,1]), array([3,4]), eye(2)*1.4)
multivariate_gaussian (array([1,1,1]), array([3,4,5]), 1.4)
or unidimensional data:
.. code-block:: Python
multivariate_gaussian(1, 3, 1.4)
In the multivariate case if cov is a scalar it is interpreted as eye(n)*cov
The function gaussian() implements the 1D (univariate)case, and is much
faster than this function.
equivalent calls:
.. code-block:: Python
multivariate_gaussian(1, 2, 3)
scipy.stats.multivariate_normal(2,3).pdf(1)
Parameters
----------
x : float, or np.array-like
Value to compute the probability for. May be a scalar if univariate,
or any type that can be converted to an np.array (list, tuple, etc).
np.array is best for speed.
mu : float, or np.array-like
mean for the Gaussian . May be a scalar if univariate, or any type
that can be converted to an np.array (list, tuple, etc).np.array is
best for speed.
cov : float, or np.array-like
Covariance for the Gaussian . May be a scalar if univariate, or any
type that can be converted to an np.array (list, tuple, etc).np.array is
best for speed.
Returns
-------
probability : float
probability for x for the Gaussian (mu,cov)
"""
warnings.warn(
("This was implemented before SciPy version 0.14, which implemented "
"scipy.stats.multivariate_normal. This function will be removed in "
"a future release of FilterPy"), DeprecationWarning)
# force all to numpy.array type, and flatten in case they are vectors
x = np.array(x, copy=False, ndmin=1).flatten()
mu = np.array(mu, copy=False, ndmin=1).flatten()
nx = len(mu)
cov = _to_cov(cov, nx)
norm_coeff = nx*math.log(2*math.pi) + np.linalg.slogdet(cov)[1]
err = x - mu
if sp.issparse(cov):
numerator = spln.spsolve(cov, err).T.dot(err)
else:
numerator = np.linalg.solve(cov, err).T.dot(err)
return math.exp(-0.5*(norm_coeff + numerator))
def multivariate_multiply(m1, c1, m2, c2):
"""
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result
"""
C1 = np.asarray(c1)
C2 = np.asarray(c2)
M1 = np.asarray(m1)
M2 = np.asarray(m2)
sum_inv = np.linalg.inv(C1+C2)
C3 = np.dot(C1, sum_inv).dot(C2)
M3 = (np.dot(C2, sum_inv).dot(M1) +
np.dot(C1, sum_inv).dot(M2))
return M3, C3
def plot_discrete_cdf(xs, ys, ax=None, xlabel=None, ylabel=None,
label=None):
"""
Plots a normal distribution CDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the cumulative probability.
Parameters
----------
xs : list-like of scalars
x values corresponding to the values in `y`s. Can be `None`, in which
case range(len(ys)) will be used.
ys : list-like of scalars
list of probabilities to be plotted which should sum to 1.
ax : matplotlib axes object, optional
If provided, the axes to draw on, otherwise plt.gca() is used.
xlim, ylim: (float,float), optional
specify the limits for the x or y axis as tuple (low,high).
If not specified, limits will be automatically chosen to be 'nice'
xlabel : str,optional
label for the x-axis
ylabel : str, optional
label for the y-axis
label : str, optional
label for the legend
Returns
-------
axis of plot
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if xs is None:
xs = range(len(ys))
ys = np.cumsum(ys)
ax.plot(xs, ys, label=label)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_gaussian_cdf(mean=0., variance=1.,
ax=None,
xlim=None, ylim=(0., 1.),
xlabel=None, ylabel=None,
label=None):
"""
Plots a normal distribution CDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the cumulative probability.
Parameters
----------
mean : scalar, default 0.
mean for the normal distribution.
variance : scalar, default 0.
variance for the normal distribution.
ax : matplotlib axes object, optional
If provided, the axes to draw on, otherwise plt.gca() is used.
xlim, ylim: (float,float), optional
specify the limits for the x or y axis as tuple (low,high).
If not specified, limits will be automatically chosen to be 'nice'
xlabel : str,optional
label for the x-axis
ylabel : str, optional
label for the y-axis
label : str, optional
label for the legend
Returns
-------
axis of plot
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
sigma = math.sqrt(variance)
n = norm(mean, sigma)
if xlim is None:
xlim = [n.ppf(0.001), n.ppf(0.999)]
xs = np.arange(xlim[0], xlim[1], (xlim[1] - xlim[0]) / 1000.)
cdf = n.cdf(xs)
ax.plot(xs, cdf, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
def plot_gaussian_pdf(mean=0.,
variance=1.,
std=None,
ax=None,
mean_line=False,
xlim=None, ylim=None,
xlabel=None, ylabel=None,
label=None):
"""
Plots a normal distribution PDF with the given mean and variance.
x-axis contains the mean, the y-axis shows the probability density.
Parameters
----------
mean : scalar, default 0.
mean for the normal distribution.
variance : scalar, default 1., optional
variance for the normal distribution.
std: scalar, default=None, optional
standard deviation of the normal distribution. Use instead of
`variance` if desired
ax : matplotlib axes object, optional
If provided, the axes to draw on, otherwise plt.gca() is used.
mean_line : boolean
draws a line at x=mean
xlim, ylim: (float,float), optional
specify the limits for the x or y axis as tuple (low,high).
If not specified, limits will be automatically chosen to be 'nice'
xlabel : str,optional
label for the x-axis
ylabel : str, optional
label for the y-axis
label : str, optional
label for the legend
Returns
-------
axis of plot
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if variance is not None and std is not None:
raise ValueError('Specify only one of variance and std')
if variance is None and std is None:
raise ValueError('Specify variance or std')
if variance is not None:
std = math.sqrt(variance)
n = norm(mean, std)
if xlim is None:
xlim = [n.ppf(0.001), n.ppf(0.999)]
xs = np.arange(xlim[0], xlim[1], (xlim[1] - xlim[0]) / 1000.)
ax.plot(xs, n.pdf(xs), label=label)
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if mean_line:
plt.axvline(mean)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
return ax
def plot_gaussian(mean=0., variance=1.,
ax=None,
mean_line=False,
xlim=None,
ylim=None,
xlabel=None,
ylabel=None,
label=None):
"""
DEPRECATED. Use plot_gaussian_pdf() instead. This is poorly named, as
there are multiple ways to plot a Gaussian.
"""
warnings.warn('This function is deprecated. It is poorly named. '\
'A Gaussian can be plotted as a PDF or CDF. This '\
'plots a PDF. Use plot_gaussian_pdf() instead,',
DeprecationWarning)
return plot_gaussian_pdf(mean, variance, ax, mean_line, xlim, ylim, xlabel,
ylabel, label)
def covariance_ellipse(P, deviations=1):
"""
Returns a tuple defining the ellipse representing the 2 dimensional
covariance matrix P.
Parameters
----------
P : nd.array shape (2,2)
covariance matrix
deviations : int (optional, default = 1)
# of standard deviations. Default is 1.
Returns (angle_radians, width_radius, height_radius)
"""
U, s, _ = linalg.svd(P)
orientation = math.atan2(U[1, 0], U[0, 0])
width = deviations * math.sqrt(s[0])
height = deviations * math.sqrt(s[1])
if height > width:
raise ValueError('width must be greater than height')
return (orientation, width, height)
def _eigsorted(cov, asc=True):
"""
Computes eigenvalues and eigenvectors of a covariance matrix and returns
them sorted by eigenvalue.
Parameters
----------
cov : ndarray
covariance matrix
asc : bool, default=True
determines whether we are sorted smallest to largest (asc=True),
or largest to smallest (asc=False)
Returns
-------
eigval : 1D ndarray
eigenvalues of covariance ordered largest to smallest
eigvec : 2D ndarray
eigenvectors of covariance matrix ordered to match `eigval` ordering.
I.e eigvec[:, 0] is the rotation vector for eigval[0]
"""
eigval, eigvec = np.linalg.eigh(cov)
order = eigval.argsort()
if not asc:
# sort largest to smallest
order = order[::-1]
return eigval[order], eigvec[:, order]
def plot_3d_covariance(mean, cov, std=1.,
ax=None, title=None,
color=None, alpha=1.,
label_xyz=True,
N=60,
shade=True,
limit_xyz=True,
**kwargs):
"""
Plots a covariance matrix `cov` as a 3D ellipsoid centered around
the `mean`.
Parameters
----------
mean : 3-vector
mean in x, y, z. Can be any type convertable to a row vector.
cov : ndarray 3x3
covariance matrix
std : double, default=1
standard deviation of ellipsoid
ax : matplotlib.axes._subplots.Axes3DSubplot, optional
Axis to draw on. If not provided, a new 3d axis will be generated
for the current figure
title : str, optional
If provided, specifies the title for the plot
color : any value convertible to a color
if specified, color of the ellipsoid.
alpha : float, default 1.
Alpha value of the ellipsoid. <1 makes is semi-transparent.
label_xyz: bool, default True
Gives labels 'X', 'Y', and 'Z' to the axis.
N : int, default=60
Number of segments to compute ellipsoid in u,v space. Large numbers
can take a very long time to plot. Default looks nice.
shade : bool, default=True
Use shading to draw the ellipse
limit_xyz : bool, default=True
Limit the axis range to fit the ellipse
**kwargs : optional
keyword arguments to supply to the call to plot_surface()
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# force mean to be a 1d vector no matter its shape when passed in
mean = np.atleast_2d(mean)
if mean.shape[1] == 1:
mean = mean.T
if not(mean.shape[0] == 1 and mean.shape[1] == 3):
raise ValueError('mean must be convertible to a 1x3 row vector')
mean = mean[0]
# force covariance to be 3x3 np.array
cov = np.asarray(cov)
if cov.shape[0] != 3 or cov.shape[1] != 3:
raise ValueError("covariance must be 3x3")
# The idea is simple - find the 3 axis of the covariance matrix
# by finding the eigenvalues and vectors. The eigenvalues are the
# radii (squared, since covariance has squared terms), and the
# eigenvectors give the rotation. So we make an ellipse with the
# given radii and then rotate it to the proper orientation.
eigval, eigvec = _eigsorted(cov, asc=True)
radii = std * np.sqrt(np.real(eigval))
if eigval[0] < 0:
raise ValueError("covariance matrix must be positive definite")
# calculate cartesian coordinates for the ellipsoid surface
u = np.linspace(0.0, 2.0 * np.pi, N)
v = np.linspace(0.0, np.pi, N)
x = np.outer(np.cos(u), np.sin(v)) * radii[0]
y = np.outer(np.sin(u), np.sin(v)) * radii[1]
z = np.outer(np.ones_like(u), np.cos(v)) * radii[2]
# rotate data with eigenvector and center on mu
a = np.kron(eigvec[:, 0], x)
b = np.kron(eigvec[:, 1], y)
c = np.kron(eigvec[:, 2], z)
data = a + b + c
N = data.shape[0]
x = data[:, 0:N] + mean[0]
y = data[:, N:N*2] + mean[1]
z = data[:, N*2:] + mean[2]
fig = plt.gcf()
if ax is None:
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z,
rstride=3, cstride=3, linewidth=0.1, alpha=alpha,
shade=shade, color=color, **kwargs)
# now make it pretty!
if label_xyz:
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if limit_xyz:
r = radii.max()
ax.set_xlim(-r + mean[0], r + mean[0])
ax.set_ylim(-r + mean[1], r + mean[1])
ax.set_zlim(-r + mean[2], r + mean[2])
if title is not None:
plt.title(title)
#pylint: disable=pointless-statement
Axes3D #kill pylint warning about unused import
return ax
def plot_covariance_ellipse(
mean, cov=None, variance=1.0, std=None,
ellipse=None, title=None, axis_equal=True, show_semiaxis=False,
facecolor=None, edgecolor=None,
fc='none', ec='#004080',
alpha=1.0, xlim=None, ylim=None,
ls='solid'):
"""
Deprecated function to plot a covariance ellipse. Use plot_covariance
instead.
See Also
--------
plot_covariance
"""
warnings.warn("deprecated, use plot_covariance instead", DeprecationWarning)
plot_covariance(mean=mean, cov=cov, variance=variance, std=std,
ellipse=ellipse, title=title, axis_equal=axis_equal,
show_semiaxis=show_semiaxis, facecolor=facecolor,
edgecolor=edgecolor, fc=fc, ec=ec, alpha=alpha,
xlim=xlim, ylim=ylim, ls=ls)
def _std_tuple_of(var=None, std=None, interval=None):
"""
Convienence function for plotting. Given one of var, standard
deviation, or interval, return the std. Any of the three can be an
iterable list.
Examples
--------
>>>_std_tuple_of(var=[1, 3, 9])
(1, 2, 3)
"""
if std is not None:
if np.isscalar(std):
std = (std,)
return std
if interval is not None:
if np.isscalar(interval):
interval = (interval,)
return norm.interval(interval)[1]
if var is None:
raise ValueError("no inputs were provided")
if np.isscalar(var):
var = (var,)
return np.sqrt(var)
def plot_covariance(
mean, cov=None, variance=1.0, std=None, interval=None,
ellipse=None, title=None, axis_equal=True,
show_semiaxis=False, show_center=True,
facecolor=None, edgecolor=None,
fc='none', ec='#004080',
alpha=1.0, xlim=None, ylim=None,
ls='solid'):
"""
Plots the covariance ellipse for the 2D normal defined by (mean, cov)
`variance` is the normal sigma^2 that we want to plot. If list-like,
ellipses for all ellipses will be ploted. E.g. [1,2] will plot the
sigma^2 = 1 and sigma^2 = 2 ellipses. Alternatively, use std for the
standard deviation, in which case `variance` will be ignored.
ellipse is a (angle,width,height) tuple containing the angle in radians,
and width and height radii.
You may provide either cov or ellipse, but not both.
Parameters
----------
mean : row vector like (2x1)
The mean of the normal
cov : ndarray-like
2x2 covariance matrix
variance : float, default 1, or iterable float, optional
Variance of the plotted ellipse. May specify std or interval instead.
If iterable, such as (1, 2**2, 3**2), then ellipses will be drawn
for all in the list.
std : float, or iterable float, optional
Standard deviation of the plotted ellipse. If specified, variance
is ignored, and interval must be `None`.
If iterable, such as (1, 2, 3), then ellipses will be drawn
for all in the list.
interval : float range [0,1), or iterable float, optional
Confidence interval for the plotted ellipse. For example, .68 (for
68%) gives roughly 1 standand deviation. If specified, variance
is ignored and `std` must be `None`
If iterable, such as (.68, .95), then ellipses will be drawn
for all in the list.
ellipse: (float, float, float)
Instead of a covariance, plots an ellipse described by (angle, width,
height), where angle is in radians, and the width and height are the
minor and major sub-axis radii. `cov` must be `None`.
title: str, optional
title for the plot
axis_equal: bool, default=True
Use the same scale for the x-axis and y-axis to ensure the aspect
ratio is correct.
show_semiaxis: bool, default=False
Draw the semiaxis of the ellipse
show_center: bool, default=True
Mark the center of the ellipse with a cross
facecolor, fc: color, default=None
If specified, fills the ellipse with the specified color. `fc` is an
allowed abbreviation
edgecolor, ec: color, default=None
If specified, overrides the default color sequence for the edge color
of the ellipse. `ec` is an allowed abbreviation
alpha: float range [0,1], default=1.
alpha value for the ellipse
xlim: float or (float,float), default=None
specifies the limits for the x-axis
ylim: float or (float,float), default=None
specifies the limits for the y-axis
ls: str, default='solid':
line style for the edge of the ellipse
"""
from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
if cov is not None and ellipse is not None:
raise ValueError('You cannot specify both cov and ellipse')
if cov is None and ellipse is None:
raise ValueError('Specify one of cov or ellipse')
if facecolor is None:
facecolor = fc
if edgecolor is None:
edgecolor = ec
if cov is not None:
ellipse = covariance_ellipse(cov)
if axis_equal:
plt.axis('equal')
if title is not None:
plt.title(title)
ax = plt.gca()
angle = np.degrees(ellipse[0])
width = ellipse[1] * 2.
height = ellipse[2] * 2.
std = _std_tuple_of(variance, std, interval)
for sd in std:
e = Ellipse(xy=mean, width=sd*width, height=sd*height, angle=angle,
facecolor=facecolor,
edgecolor=edgecolor,
alpha=alpha,
lw=2, ls=ls)
ax.add_patch(e)
x, y = mean
if show_center:
plt.scatter(x, y, marker='+', color=edgecolor)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if show_semiaxis:
a = ellipse[0]
h, w = height/4, width/4
plt.plot([x, x+ h*cos(a+np.pi/2)], [y, y + h*sin(a+np.pi/2)])
plt.plot([x, x+ w*cos(a)], [y, y + w*sin(a)])
def norm_cdf(x_range, mu, var=1, std=None):
"""
Computes the probability that a Gaussian distribution lies
within a range of values.
Parameters
----------
x_range : (float, float)
tuple of range to compute probability for
mu : float
mean of the Gaussian
var : float, optional
variance of the Gaussian. Ignored if `std` is provided
std : float, optional
standard deviation of the Gaussian. This overrides the `var` parameter
Returns
-------
probability : float
probability that Gaussian is within x_range. E.g. .1 means 10%.
"""
if std is None:
std = math.sqrt(var)
return abs(norm.cdf(x_range[0], loc=mu, scale=std) -
norm.cdf(x_range[1], loc=mu, scale=std))
def _to_cov(x, n):
"""
If x is a scalar, returns a covariance matrix generated from it
as the identity matrix multiplied by x. The dimension will be nxn.
If x is already a 2D numpy array then it is returned unchanged.
Raises ValueError if not positive definite
"""
if np.isscalar(x):
if x < 0:
raise ValueError('covariance must be > 0')
return np.eye(n) * x
x = np.atleast_2d(x)
try:
# quickly find out if we are positive definite
np.linalg.cholesky(x)
except:
raise ValueError('covariance must be positive definit')
return x
def rand_student_t(df, mu=0, std=1):
"""
return random number distributed by student's t distribution with
`df` degrees of freedom with the specified mean and standard deviation.
"""
x = random.gauss(0, std)
y = 2.0*random.gammavariate(0.5 * df, 2.0)
return x / (math.sqrt(y / df)) + mu
def NEES(xs, est_xs, ps):
"""
Computes the normalized estimated error squared (NEES) test on a sequence
of estimates. The estimates are optimal if the mean error is zero and
the covariance matches the Kalman filter's covariance. If this holds,
then the mean of the NEES should be equal to or less than the dimension
of x.
Examples
--------
.. code-block: Python
xs = ground_truth()
est_xs, ps, _, _ = kf.batch_filter(zs)
NEES(xs, est_xs, ps)
Parameters
----------
xs : list-like
sequence of true values for the state x
est_xs : list-like
sequence of estimates from an estimator (such as Kalman filter)
ps : list-like
sequence of covariance matrices from the estimator
Returns
-------
errs : list of floats
list of NEES computed for each estimate
"""
est_err = xs - est_xs
errs = []
for x, p in zip(est_err, ps):
errs.append(np.dot(x.T, linalg.inv(p)).dot(x))
return errs
| mit |
harisbal/pandas | pandas/conftest.py | 2 | 13673 | import importlib
import os
import hypothesis
from hypothesis import strategies as st
import numpy as np
import pytest
from pandas.compat import PY3
import pandas.util._test_decorators as td
import pandas as pd
hypothesis.settings.register_profile(
"ci",
# Hypothesis timing checks are tuned for scalars by default, so we bump
# them from 200ms to 500ms per test case as the global default. If this
# is too short for a specific test, (a) try to make it faster, and (b)
# if it really is slow add `@settings(deadline=...)` with a working value,
# or `deadline=None` to entirely disable timeouts for that test.
deadline=500,
timeout=hypothesis.unlimited,
suppress_health_check=(hypothesis.HealthCheck.too_slow,)
)
hypothesis.settings.load_profile("ci")
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true",
help="skip slow tests")
parser.addoption("--skip-network", action="store_true",
help="skip network tests")
parser.addoption("--run-high-memory", action="store_true",
help="run high memory tests")
parser.addoption("--only-slow", action="store_true",
help="run only slow tests")
parser.addoption("--strict-data-files", action="store_true",
help="Fail if a test is skipped for missing data file.")
def pytest_runtest_setup(item):
if 'slow' in item.keywords and item.config.getoption("--skip-slow"):
pytest.skip("skipping due to --skip-slow")
if 'slow' not in item.keywords and item.config.getoption("--only-slow"):
pytest.skip("skipping due to --only-slow")
if 'network' in item.keywords and item.config.getoption("--skip-network"):
pytest.skip("skipping due to --skip-network")
if 'high_memory' in item.keywords and not item.config.getoption(
"--run-high-memory"):
pytest.skip(
"skipping high memory test since --run-high-memory was not set")
# Configurations for all tests and all test modules
@pytest.fixture(autouse=True)
def configure_tests():
pd.set_option('chained_assignment', 'raise')
# For running doctests: make np and pd names available
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
doctest_namespace['np'] = np
doctest_namespace['pd'] = pd
@pytest.fixture(params=['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'])
def spmatrix(request):
from scipy import sparse
return getattr(sparse, request.param + '_matrix')
@pytest.fixture(params=[0, 1, 'index', 'columns'],
ids=lambda x: "axis {!r}".format(x))
def axis(request):
"""
Fixture for returning the axis numbers of a DataFrame.
"""
return request.param
axis_frame = axis
@pytest.fixture(params=[0, 'index'], ids=lambda x: "axis {!r}".format(x))
def axis_series(request):
"""
Fixture for returning the axis numbers of a Series.
"""
return request.param
@pytest.fixture
def ip():
"""
Get an instance of IPython.InteractiveShell.
Will raise a skip if IPython is not installed.
"""
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.interactiveshell import InteractiveShell
return InteractiveShell()
@pytest.fixture(params=[True, False, None])
def observed(request):
""" pass in the observed keyword to groupby for [True, False]
This indicates whether categoricals should return values for
values which are not in the grouper [False / None], or only values which
appear in the grouper [True]. [None] is supported for future compatiblity
if we decide to change the default (and would need to warn if this
parameter is not passed)"""
return request.param
_all_arithmetic_operators = ['__add__', '__radd__',
'__sub__', '__rsub__',
'__mul__', '__rmul__',
'__floordiv__', '__rfloordiv__',
'__truediv__', '__rtruediv__',
'__pow__', '__rpow__',
'__mod__', '__rmod__']
if not PY3:
_all_arithmetic_operators.extend(['__div__', '__rdiv__'])
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations
"""
return request.param
_all_numeric_reductions = ['sum', 'max', 'min',
'mean', 'prod', 'std', 'var', 'median',
'kurt', 'skew']
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names
"""
return request.param
_all_boolean_reductions = ['all', 'any']
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names
"""
return request.param
_cython_table = pd.core.base.SelectionMixin._cython_table.items()
@pytest.fixture(params=list(_cython_table))
def cython_table_items(request):
return request.param
def _get_cython_table_params(ndframe, func_names_and_expected):
"""combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value
Returns
-------
results : list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [(ndframe, func, expected) for func, name in _cython_table
if name == func_name]
return results
@pytest.fixture(params=['__eq__', '__ne__', '__le__',
'__lt__', '__ge__', '__gt__'])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
@pytest.fixture(params=[None, 'gzip', 'bz2', 'zip',
pytest.param('xz', marks=td.skip_if_no_lzma)])
def compression(request):
"""
Fixture for trying common compression types in compression tests
"""
return request.param
@pytest.fixture(params=['gzip', 'bz2', 'zip',
pytest.param('xz', marks=td.skip_if_no_lzma)])
def compression_only(request):
"""
Fixture for trying common compression types in compression tests excluding
uncompressed case
"""
return request.param
@pytest.fixture(params=[True, False])
def writable(request):
"""
Fixture that an array is writable
"""
return request.param
@pytest.fixture(scope='module')
def datetime_tz_utc():
from datetime import timezone
return timezone.utc
@pytest.fixture(params=['inner', 'outer', 'left', 'right'])
def join_type(request):
"""
Fixture for trying all types of join operations
"""
return request.param
@pytest.fixture
def datapath(request):
"""Get the path to a data file.
Parameters
----------
path : str
Path to the file, relative to ``pandas/tests/``
Returns
-------
path : path including ``pandas/tests``.
Raises
------
ValueError
If the path doesn't exist and the --strict-data-files option is set.
"""
BASE_PATH = os.path.join(os.path.dirname(__file__), 'tests')
def deco(*args):
path = os.path.join(BASE_PATH, *args)
if not os.path.exists(path):
if request.config.getoption("--strict-data-files"):
msg = "Could not find file {} and --strict-data-files is set."
raise ValueError(msg.format(path))
else:
msg = "Could not find {}."
pytest.skip(msg.format(path))
return path
return deco
@pytest.fixture
def iris(datapath):
"""The iris dataset as a DataFrame."""
return pd.read_csv(datapath('data', 'iris.csv'))
@pytest.fixture(params=['nlargest', 'nsmallest'])
def nselect_method(request):
"""
Fixture for trying all nselect methods
"""
return request.param
@pytest.fixture(params=['left', 'right', 'both', 'neither'])
def closed(request):
"""
Fixture for trying all interval closed parameters
"""
return request.param
@pytest.fixture(params=['left', 'right', 'both', 'neither'])
def other_closed(request):
"""
Secondary closed fixture to allow parametrizing over all pairs of closed
"""
return request.param
@pytest.fixture(params=[None, np.nan, pd.NaT, float('nan'), np.float('NaN')])
def nulls_fixture(request):
"""
Fixture for each null type in pandas
"""
return request.param
nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture
@pytest.fixture(params=[None, np.nan, pd.NaT])
def unique_nulls_fixture(request):
"""
Fixture for each null type in pandas, each null type exactly once
"""
return request.param
# Generate cartesian product of unique_nulls_fixture:
unique_nulls_fixture2 = unique_nulls_fixture
TIMEZONES = [None, 'UTC', 'US/Eastern', 'Asia/Tokyo', 'dateutil/US/Pacific',
'dateutil/Asia/Singapore']
@td.parametrize_fixture_doc(str(TIMEZONES))
@pytest.fixture(params=TIMEZONES)
def tz_naive_fixture(request):
"""
Fixture for trying timezones including default (None): {0}
"""
return request.param
@td.parametrize_fixture_doc(str(TIMEZONES[1:]))
@pytest.fixture(params=TIMEZONES[1:])
def tz_aware_fixture(request):
"""
Fixture for trying explicit timezones: {0}
"""
return request.param
UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"]
SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
FLOAT_DTYPES = [float, "float32", "float64"]
COMPLEX_DTYPES = [complex, "complex64", "complex128"]
STRING_DTYPES = [str, 'str', 'U']
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = ALL_REAL_DTYPES + COMPLEX_DTYPES + STRING_DTYPES
@pytest.fixture(params=STRING_DTYPES)
def string_dtype(request):
"""Parametrized fixture for string dtypes.
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture(params=FLOAT_DTYPES)
def float_dtype(request):
"""
Parameterized fixture for float dtypes.
* float32
* float64
"""
return request.param
@pytest.fixture(params=COMPLEX_DTYPES)
def complex_dtype(request):
"""
Parameterized fixture for complex dtypes.
* complex64
* complex128
"""
return request.param
@pytest.fixture(params=SIGNED_INT_DTYPES)
def sint_dtype(request):
"""
Parameterized fixture for signed integer dtypes.
* int8
* int16
* int32
* int64
"""
return request.param
@pytest.fixture(params=UNSIGNED_INT_DTYPES)
def uint_dtype(request):
"""
Parameterized fixture for unsigned integer dtypes.
* uint8
* uint16
* uint32
* uint64
"""
return request.param
@pytest.fixture(params=ALL_INT_DTYPES)
def any_int_dtype(request):
"""
Parameterized fixture for any integer dtypes.
* int8
* uint8
* int16
* uint16
* int32
* uint32
* int64
* uint64
"""
return request.param
@pytest.fixture(params=ALL_REAL_DTYPES)
def any_real_dtype(request):
"""
Parameterized fixture for any (purely) real numeric dtypes.
* int8
* uint8
* int16
* uint16
* int32
* uint32
* int64
* uint64
* float32
* float64
"""
return request.param
@pytest.fixture(params=ALL_NUMPY_DTYPES)
def any_numpy_dtype(request):
"""
Parameterized fixture for all numpy dtypes.
* int8
* uint8
* int16
* uint16
* int32
* uint32
* int64
* uint64
* float32
* float64
* complex64
* complex128
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture
def mock():
"""
Fixture providing the 'mock' module.
Uses 'unittest.mock' for Python 3. Attempts to import the 3rd party 'mock'
package for Python 2, skipping if not present.
"""
if PY3:
return importlib.import_module("unittest.mock")
else:
return pytest.importorskip("mock")
# ----------------------------------------------------------------
# Global setup for tests using Hypothesis
# Registering these strategies makes them globally available via st.from_type,
# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py
for name in 'MonthBegin MonthEnd BMonthBegin BMonthEnd'.split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(cls, st.builds(
cls,
n=st.integers(-99, 99),
normalize=st.booleans(),
))
for name in 'YearBegin YearEnd BYearBegin BYearEnd'.split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(cls, st.builds(
cls,
n=st.integers(-5, 5),
normalize=st.booleans(),
month=st.integers(min_value=1, max_value=12),
))
for name in 'QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd'.split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(cls, st.builds(
cls,
n=st.integers(-24, 24),
normalize=st.booleans(),
startingMonth=st.integers(min_value=1, max_value=12)
))
| bsd-3-clause |
nextstrain/augur | augur/utils.py | 1 | 24646 | import argparse
import Bio
import Bio.Phylo
import gzip
import os, json, sys
import pandas as pd
import subprocess
import shlex
from contextlib import contextmanager
from treetime.utils import numeric_date
from collections import defaultdict
from pkg_resources import resource_stream
from io import TextIOWrapper
from .__version__ import __version__
from augur.io import open_file
from augur.util_support.color_parser import ColorParser
from augur.util_support.date_disambiguator import DateDisambiguator
from augur.util_support.metadata_file import MetadataFile
from augur.util_support.node_data_reader import NodeDataReader
from augur.util_support.shell_command_runner import ShellCommandRunner
class AugurException(Exception):
pass
def is_vcf(fname):
"""Convenience method to check if a file is a vcf file.
>>> is_vcf("./foo")
False
>>> is_vcf("./foo.vcf")
True
>>> is_vcf("./foo.vcf.GZ")
True
"""
return fname.lower().endswith(".vcf") or fname.lower().endswith(".vcf.gz")
def myopen(fname, mode):
if fname.endswith('.gz'):
import gzip
return gzip.open(fname, mode, encoding='utf-8')
else:
return open(fname, mode, encoding='utf-8')
def get_json_name(args, default=None):
if args.output_node_data:
return args.output_node_data
else:
if default:
print("WARNING: no name for the output file was specified. Writing results to %s."%default, file=sys.stderr)
return default
else:
raise ValueError("Please specify a name for the JSON file containing the results.")
def ambiguous_date_to_date_range(uncertain_date, fmt, min_max_year=None):
return DateDisambiguator(uncertain_date, fmt=fmt, min_max_year=min_max_year).range()
def read_metadata(fname, query=None):
return MetadataFile(fname, query).read()
def is_date_ambiguous(date, ambiguous_by="any"):
"""
Returns whether a given date string in the format of YYYY-MM-DD is ambiguous by a given part of the date (e.g., day, month, year, or any parts).
Parameters
----------
date : str
Date string in the format of YYYY-MM-DD
ambiguous_by : str
Field of the date string to test for ambiguity ("day", "month", "year", "any")
"""
date_components = date.split('-', 2)
if len(date_components) == 3:
year, month, day = date_components
elif len(date_components) == 2:
year, month = date_components
day = "XX"
else:
year = date_components[0]
month = "XX"
day = "XX"
# Determine ambiguity hierarchically such that, for example, an ambiguous
# month implicates an ambiguous day even when day information is available.
return any((
"X" in year,
"X" in month and ambiguous_by in ("any", "month", "day"),
"X" in day and ambiguous_by in ("any", "day")
))
def get_numerical_dates(meta_dict, name_col = None, date_col='date', fmt=None, min_max_year=None):
if fmt:
from datetime import datetime
numerical_dates = {}
for k,m in meta_dict.items():
v = m[date_col]
if type(v)!=str:
print("WARNING: %s has an invalid data string:"%k,v)
continue
elif 'XX' in v:
ambig_date = ambiguous_date_to_date_range(v, fmt, min_max_year)
if ambig_date is None or None in ambig_date:
numerical_dates[k] = [None, None] #don't send to numeric_date or will be set to today
else:
numerical_dates[k] = [numeric_date(d) for d in ambig_date]
else:
try:
numerical_dates[k] = numeric_date(datetime.strptime(v, fmt))
except:
numerical_dates[k] = None
else:
numerical_dates = {k:float(v) for k,v in meta_dict.items()}
return numerical_dates
class InvalidTreeError(Exception):
"""Represents an error loading a phylogenetic tree from a filename.
"""
pass
def read_tree(fname, min_terminals=3):
"""Safely load a tree from a given filename or raise an error if the file does
not contain a valid tree.
Parameters
----------
fname : str
name of a file containing a phylogenetic tree
min_terminals : int
minimum number of terminals required for the parsed tree as a sanity
check on the tree
Raises
------
InvalidTreeError
If the given file exists but does not seem to contain a valid tree format.
Returns
-------
Bio.Phylo :
BioPython tree instance
"""
T = None
supported_tree_formats = ["newick", "nexus"]
for fmt in supported_tree_formats:
try:
T = Bio.Phylo.read(fname, fmt)
# Check the sanity of the parsed tree to handle cases when non-tree
# data are still successfully parsed by BioPython. Too few terminals
# in a tree indicates that the input is not valid.
if T.count_terminals() < min_terminals:
T = None
else:
break
except ValueError:
# We cannot open the tree in the current format, so we will try
# another.
pass
# If the tree cannot be loaded, raise an error to that effect.
if T is None:
raise InvalidTreeError(
"Could not read the given tree %s using the following supported formats: %s" % (fname, ", ".join(supported_tree_formats))
)
return T
def read_node_data(fnames, tree=None):
return NodeDataReader(fnames, tree).read()
def write_json(data, file_name, indent=(None if os.environ.get("AUGUR_MINIFY_JSON") else 2), include_version=True):
"""
Write ``data`` as JSON to the given ``file_name``, creating parent directories
if necessary. The augur version is included as a top-level key "augur_version".
Parameters
----------
data : dict
data to write out to JSON
file_name : str
file name to write to
indent : int or None, optional
JSON indentation level. Default is `None` if the environment variable `AUGUR_MINIFY_JSON`
is truthy, else 1
include_version : bool, optional
Include the augur version. Default: `True`.
Raises
------
OSError
"""
#in case parent folder does not exist yet
parent_directory = os.path.dirname(file_name)
if parent_directory and not os.path.exists(parent_directory):
try:
os.makedirs(parent_directory)
except OSError: #Guard against race condition
if not os.path.isdir(parent_directory):
raise
if include_version:
data["generated_by"] = {"program": "augur", "version": get_augur_version()}
with open(file_name, 'w', encoding='utf-8') as handle:
json.dump(data, handle, indent=indent, sort_keys=True)
def load_features(reference, feature_names=None):
#read in appropriately whether GFF or Genbank
#checks explicitly for GFF otherwise assumes Genbank
if not os.path.isfile(reference):
print("ERROR: reference sequence not found. looking for", reference)
return None
features = {}
if '.gff' in reference.lower():
#looks for 'gene' and 'gene' as best for TB
try:
from BCBio import GFF #Package name is confusing - tell user exactly what they need!
except ImportError:
print("ERROR: Package BCBio.GFF not found! Please install using \'pip install bcbio-gff\' before re-running.")
return None
limit_info = dict( gff_type = ['gene'] )
with open(reference, encoding='utf-8') as in_handle:
for rec in GFF.parse(in_handle, limit_info=limit_info):
for feat in rec.features:
if feature_names is not None: #check both tags; user may have used either
if "gene" in feat.qualifiers and feat.qualifiers["gene"][0] in feature_names:
fname = feat.qualifiers["gene"][0]
elif "locus_tag" in feat.qualifiers and feat.qualifiers["locus_tag"][0] in feature_names:
fname = feat.qualifiers["locus_tag"][0]
else:
fname = None
else:
if "gene" in feat.qualifiers:
fname = feat.qualifiers["gene"][0]
else:
fname = feat.qualifiers["locus_tag"][0]
if fname:
features[fname] = feat
if feature_names is not None:
for fe in feature_names:
if fe not in features:
print("Couldn't find gene {} in GFF or GenBank file".format(fe))
else:
from Bio import SeqIO
for feat in SeqIO.read(reference, 'genbank').features:
if feat.type=='CDS':
if "locus_tag" in feat.qualifiers:
fname = feat.qualifiers["locus_tag"][0]
if feature_names is None or fname in feature_names:
features[fname] = feat
elif "gene" in feat.qualifiers:
fname = feat.qualifiers["gene"][0]
if feature_names is None or fname in feature_names:
features[fname] = feat
elif feat.type=='source': #read 'nuc' as well for annotations - need start/end of whole!
features['nuc'] = feat
return features
def read_config(fname):
if not (fname and os.path.isfile(fname)):
print("ERROR: config file %s not found."%fname)
return defaultdict(dict)
try:
with open(fname, 'rb') as ifile:
config = json.load(ifile)
except json.decoder.JSONDecodeError as err:
print("FATAL ERROR:")
print("\tCouldn't parse the JSON file {}".format(fname))
print("\tError message: '{}'".format(err.msg))
print("\tLine number: '{}'".format(err.lineno))
print("\tColumn number: '{}'".format(err.colno))
print("\tYou must correct this file in order to proceed.")
sys.exit(2)
return config
def read_lat_longs(overrides=None, use_defaults=True):
coordinates = {}
# TODO: make parsing of tsv files more robust while allow for whitespace delimiting for backwards compatibility
def add_line_to_coordinates(line):
if line.startswith('#') or line.strip() == "":
return
fields = line.strip().split() if not '\t' in line else line.strip().split('\t')
if len(fields) == 4:
geo_field, loc = fields[0].lower(), fields[1].lower()
lat, long = float(fields[2]), float(fields[3])
coordinates[(geo_field, loc)] = {
"latitude": lat,
"longitude": long
}
else:
print("WARNING: geo-coordinate file contains invalid line. Please make sure not to mix tabs and spaces as delimiters (use only tabs):",line)
if use_defaults:
with resource_stream(__package__, "data/lat_longs.tsv") as stream:
with TextIOWrapper(stream, "utf-8") as defaults:
for line in defaults:
add_line_to_coordinates(line)
if overrides:
if os.path.isfile(overrides):
with open(overrides, encoding='utf-8') as ifile:
for line in ifile:
add_line_to_coordinates(line)
else:
print("WARNING: input lat/long file %s not found." % overrides)
return coordinates
def read_colors(overrides=None, use_defaults=True):
return ColorParser(mapping_filename=overrides, use_defaults=use_defaults).mapping
def write_VCF_translation(prot_dict, vcf_file_name, ref_file_name):
"""
Writes out a VCF-style file (which seems to be minimally handleable
by vcftools and pyvcf) of the AA differences between sequences and the reference.
This is a similar format created/used by read_in_vcf except that there is one
of these dicts (with sequences, reference, positions) for EACH gene.
Also writes out a fasta of the reference alignment.
EBH 12 Dec 2017
"""
import numpy as np
#for the header
seqNames = list(prot_dict[list(prot_dict.keys())[0]]['sequences'].keys())
#prepare the header of the VCF & write out
header=["#CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT"]+seqNames
with open(vcf_file_name, 'w', encoding='utf-8') as the_file:
the_file.write( "##fileformat=VCFv4.2\n"+
"##source=NextStrain_Protein_Translation\n"+
"##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n")
the_file.write("\t".join(header)+"\n")
refWrite = []
vcfWrite = []
#go through for every gene/protein
for fname, prot in prot_dict.items():
sequences = prot['sequences']
ref = prot['reference']
positions = prot['positions']
#write out the reference fasta
refWrite.append(">"+fname)
refWrite.append(ref)
#go through every variable position
#There are no deletions here, so it's simpler than for VCF nuc sequenes!
for pi in positions:
pos = pi+1 #change numbering to match VCF not python
refb = ref[pi] #reference base at this position
#try/except is (much) faster than list comprehension!
pattern = []
for k,v in sequences.items():
try:
pattern.append(sequences[k][pi])
except KeyError:
pattern.append('.')
pattern = np.array(pattern)
#get the list of ALTs - minus any '.'!
uniques = np.unique(pattern)
uniques = uniques[np.where(uniques!='.')]
#Convert bases to the number that matches the ALT
j=1
for u in uniques:
pattern[np.where(pattern==u)[0]] = str(j)
j+=1
#Now convert these calls to #/# (VCF format)
calls = [ j+"/"+j if j!='.' else '.' for j in pattern ]
if len(uniques)==0:
print("UNEXPECTED ERROR WHILE CONVERTING TO VCF AT POSITION {}".format(str(pi)))
break
#put it all together and write it out
output = [fname, str(pos), ".", refb, ",".join(uniques), ".", "PASS", ".", "GT"] + calls
vcfWrite.append("\t".join(output))
#write it all out
with open(ref_file_name, 'w', encoding='utf-8') as the_file:
the_file.write("\n".join(refWrite))
with open(vcf_file_name, 'a', encoding='utf-8') as the_file:
the_file.write("\n".join(vcfWrite))
if vcf_file_name.lower().endswith('.gz'):
import os
#must temporarily remove .gz ending, or gzip won't zip it!
os.rename(vcf_file_name, vcf_file_name[:-3])
call = ["gzip", vcf_file_name[:-3]]
run_shell_command(" ".join(call), raise_errors = True)
shquote = shlex.quote
def run_shell_command(cmd, raise_errors=False, extra_env=None):
"""
Run the given command string via Bash with error checking.
Returns True if the command exits normally. Returns False if the command
exits with failure and "raise_errors" is False (the default). When
"raise_errors" is True, exceptions are rethrown.
If an *extra_env* mapping is passed, the provided keys and values are
overlayed onto the default subprocess environment.
"""
return ShellCommandRunner(cmd, raise_errors=raise_errors, extra_env=extra_env).run()
def first_line(text):
"""
Returns the first line of the given text, ignoring leading and trailing
whitespace.
"""
return text.strip().splitlines()[0]
def available_cpu_cores(fallback: int = 1) -> int:
"""
Returns the number (an int) of CPU cores available to this **process**, if
determinable, otherwise the number of CPU cores available to the
**computer**, if determinable, otherwise the *fallback* number (which
defaults to 1).
"""
try:
# Note that this is the correct function to use, not os.cpu_count(), as
# described in the latter's documentation.
#
# The reason, which the documentation does not detail, is that
# processes may be pinned or restricted to certain CPUs by setting
# their "affinity". This is not typical except in high-performance
# computing environments, but if it is done, then a computer with say
# 24 total cores may only allow our process to use 12. If we tried to
# naively use all 24, we'd end up with two threads across the 12 cores.
# This would degrade performance rather than improve it!
return len(os.sched_getaffinity(0))
except:
# cpu_count() returns None if the value is indeterminable.
return os.cpu_count() or fallback
def nthreads_value(value):
"""
Argument value validation and casting function for --nthreads.
"""
if value.lower() == 'auto':
return available_cpu_cores()
try:
return int(value)
except ValueError:
raise argparse.ArgumentTypeError("'%s' is not an integer or the word 'auto'" % value) from None
def get_parent_name_by_child_name_for_tree(tree):
'''
Return dictionary mapping child node names to parent node names
'''
parents = {}
for clade in tree.find_clades(order='level'):
for child in clade:
parents[child.name] = clade.name
return parents
def annotate_parents_for_tree(tree):
"""Annotate each node in the given tree with its parent.
>>> import io
>>> tree = Bio.Phylo.read(io.StringIO("(A, (B, C))"), "newick")
>>> not any([hasattr(node, "parent") for node in tree.find_clades()])
True
>>> tree = annotate_parents_for_tree(tree)
>>> tree.root.parent is None
True
>>> all([hasattr(node, "parent") for node in tree.find_clades()])
True
"""
tree.root.parent = None
for node in tree.find_clades(order="level"):
for child in node.clades:
child.parent = node
# Return the tree.
return tree
def json_to_tree(json_dict, root=True):
"""Returns a Bio.Phylo tree corresponding to the given JSON dictionary exported
by `tree_to_json`.
Assigns links back to parent nodes for the root of the tree.
Test opening a JSON from augur export v1.
>>> import json
>>> json_fh = open("tests/data/json_tree_to_nexus/flu_h3n2_ha_3y_tree.json", "r")
>>> json_dict = json.load(json_fh)
>>> tree = json_to_tree(json_dict)
>>> tree.name
'NODE_0002020'
>>> len(tree.clades)
2
>>> tree.clades[0].name
'NODE_0001489'
>>> hasattr(tree, "attr")
True
>>> "dTiter" in tree.attr
True
>>> tree.clades[0].parent.name
'NODE_0002020'
>>> tree.clades[0].branch_length > 0
True
Test opening a JSON from augur export v2.
>>> json_fh = open("tests/data/zika.json", "r")
>>> json_dict = json.load(json_fh)
>>> tree = json_to_tree(json_dict)
>>> hasattr(tree, "name")
True
>>> len(tree.clades) > 0
True
>>> tree.clades[0].branch_length > 0
True
"""
# Check for v2 JSON which has combined metadata and tree data.
if root and "meta" in json_dict and "tree" in json_dict:
json_dict = json_dict["tree"]
node = Bio.Phylo.Newick.Clade()
# v1 and v2 JSONs use different keys for strain names.
if "name" in json_dict:
node.name = json_dict["name"]
else:
node.name = json_dict["strain"]
if "children" in json_dict:
# Recursively add children to the current node.
node.clades = [json_to_tree(child, root=False) for child in json_dict["children"]]
# Assign all non-children attributes.
for attr, value in json_dict.items():
if attr != "children":
setattr(node, attr, value)
# Only v1 JSONs support a single `attr` attribute.
if hasattr(node, "attr"):
node.numdate = node.attr.get("num_date")
node.branch_length = node.attr.get("div")
if "translations" in node.attr:
node.translations = node.attr["translations"]
elif hasattr(node, "node_attrs"):
node.branch_length = node.node_attrs.get("div")
if root:
node = annotate_parents_for_tree(node)
return node
def get_augur_version():
"""
Returns a string of the current augur version.
"""
return __version__
def read_bed_file(bed_file):
"""Read a BED file and return a list of excluded sites.
Note: This function assumes the given file is a BED file. On parsing
failures, it will attempt to skip the first line and retry, but no
other error checking is attempted. Incorrectly formatted files will
raise errors.
Parameters
----------
bed_file : str
Path to the BED file
Returns:
--------
list[int]:
Sorted list of unique zero-indexed sites
"""
mask_sites = []
try:
bed = pd.read_csv(bed_file, sep='\t', header=None, usecols=[1,2],
dtype={1:int,2:int})
except ValueError:
# Check if we have a header row. Otherwise, just fail.
bed = pd.read_csv(bed_file, sep='\t', header=None, usecols=[1,2],
dtype={1:int,2:int}, skiprows=1)
print("Skipped row 1 of %s, assuming it is a header." % bed_file)
for _, row in bed.iterrows():
mask_sites.extend(range(row[1], row[2]))
return sorted(set(mask_sites))
def read_mask_file(mask_file):
"""Read a masking file and return a list of excluded sites.
Masking files have a single masking site per line, either alone
or as the second column of a tab-separated file. These sites
are assumed to be one-indexed, NOT zero-indexed. Incorrectly
formatted lines will be skipped.
Parameters
----------
mask_file : str
Path to the masking file
Returns:
--------
list[int]:
Sorted list of unique zero-indexed sites
"""
mask_sites = []
with open(mask_file, encoding='utf-8') as mf:
for idx, line in enumerate(l.strip() for l in mf.readlines()):
if "\t" in line:
line = line.split("\t")[1]
try:
mask_sites.append(int(line) - 1)
except ValueError as err:
print("Could not read line %s of %s: '%s' - %s" %
(idx, mask_file, line, err), file=sys.stderr)
raise
return sorted(set(mask_sites))
def load_mask_sites(mask_file):
"""Load masking sites from either a BED file or a masking file.
Parameters
----------
mask_file: str
Path to the BED or masking file
Returns
-------
list[int]
Sorted list of unique zero-indexed sites
"""
if mask_file.lower().endswith(".bed"):
mask_sites = read_bed_file(mask_file)
else:
mask_sites = read_mask_file(mask_file)
print("%d masking sites read from %s" % (len(mask_sites), mask_file))
return mask_sites
VALID_NUCLEOTIDES = { # http://reverse-complement.com/ambiguity.html
"A", "G", "C", "T", "U", "N", "R", "Y", "S", "W", "K", "M", "B", "V", "D", "H", "-",
"a", "g", "c", "t", "u", "n", "r", "y", "s", "w", "k", "m", "b", "v", "d", "h", "-"
}
def read_strains(*files, comment_char="#"):
"""Reads strain names from one or more plain text files and returns the
set of distinct strains.
Strain names can be commented with full-line or inline comments. For
example, the following is a valid strain names file:
# this is a comment at the top of the file
strain1 # exclude strain1 because it isn't sequenced properly
strain2
# this is an empty line that will be ignored.
Parameters
----------
files : one or more str
one or more names of text files with one strain name per line
Returns
-------
set :
strain names from the given input files
"""
strains = set()
for input_file in files:
with open_file(input_file, 'r') as ifile:
for line in ifile:
# Allow comments anywhere in a given line.
strain_name = line.split(comment_char)[0].strip()
if len(strain_name) > 0:
strains.add(strain_name)
return strains
| agpl-3.0 |
DinoCow/airflow | tests/test_utils/perf/scheduler_ops_metrics.py | 7 | 7513 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
import pandas as pd
from airflow import settings
from airflow.configuration import conf
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.utils import timezone
from airflow.utils.state import State
SUBDIR = 'scripts/perf/dags'
DAG_IDS = ['perf_dag_1', 'perf_dag_2']
MAX_RUNTIME_SECS = 6
class SchedulerMetricsJob(SchedulerJob):
"""
This class extends SchedulerJob to instrument the execution performance of
task instances contained in each DAG. We want to know if any DAG
is starved of resources, and this will be reflected in the stats printed
out at the end of the test run. The following metrics will be instrumented
for each task instance (dag_id, task_id, execution_date) tuple:
1. Queuing delay - time taken from starting the executor to the task
instance to be added to the executor queue.
2. Start delay - time taken from starting the executor to the task instance
to start execution.
3. Land time - time taken from starting the executor to task instance
completion.
4. Duration - time taken for executing the task instance.
The DAGs implement bash operators that call the system wait command. This
is representative of typical operators run on Airflow - queries that are
run on remote systems and spend the majority of their time on I/O wait.
To Run:
$ python scripts/perf/scheduler_ops_metrics.py [timeout]
You can specify timeout in seconds as an optional parameter.
Its default value is 6 seconds.
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerMetricsJob'}
def __init__(self, dag_ids, subdir, max_runtime_secs):
self.max_runtime_secs = max_runtime_secs
super().__init__(dag_ids=dag_ids, subdir=subdir)
def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = session.query(TI).filter(TI.dag_id.in_(DAG_IDS)).all()
successful_tis = [x for x in tis if x.state == State.SUCCESS]
ti_perf = [
(
ti.dag_id,
ti.task_id,
ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration,
)
for ti in successful_tis
]
ti_perf_df = pd.DataFrame(
ti_perf,
columns=[
'dag_id',
'task_id',
'execution_date',
'queue_delay',
'start_delay',
'land_time',
'duration',
],
)
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print(f'DAG {dag_id}')
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(
pd.DataFrame(
[
(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)
],
columns=['dag_id', 'task_id', 'execution_date', 'state'],
)
)
session.commit()
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super().heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session.query(TI).filter(TI.dag_id.in_(DAG_IDS)).filter(TI.state.in_([State.SUCCESS])).all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum(
[(timezone.utcnow() - task.start_date).days for dag in dags for task in dag.tasks]
)
if (
len(successful_tis) == num_task_instances
or (timezone.utcnow() - self.start_date).total_seconds() > self.max_runtime_secs
):
if len(successful_tis) == num_task_instances:
self.log.info("All tasks processed! Printing stats.")
else:
self.log.info("Test timeout reached. Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit()
def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = (
session.query(DagRun)
.filter(
DagRun.dag_id.in_(DAG_IDS),
)
.all()
)
for dr in drs:
logging.info('Deleting DagRun :: %s', dr)
session.delete(dr)
def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = session.query(TI).filter(TI.dag_id.in_(DAG_IDS)).all()
for ti in tis:
logging.info('Deleting TaskInstance :: %s', ti)
session.delete(ti)
session.commit()
def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dag_models = session.query(DagModel).filter(DagModel.dag_id.in_(DAG_IDS))
for dag_model in dag_models:
logging.info('Setting DAG :: %s is_paused=%s', dag_model, is_paused)
dag_model.is_paused = is_paused
session.commit()
def main():
"""
Run the scheduler metrics jobs after loading the test configuration and
clearing old instances of dags and tasks
"""
max_runtime_secs = MAX_RUNTIME_SECS
if len(sys.argv) > 1:
try:
max_runtime_secs = int(sys.argv[1])
if max_runtime_secs < 1:
raise ValueError
except ValueError:
logging.error('Specify a positive integer for timeout.')
sys.exit(1)
conf.load_test_config()
set_dags_paused_state(False)
clear_dag_runs()
clear_dag_task_instances()
job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR, max_runtime_secs=max_runtime_secs)
job.run()
if __name__ == "__main__":
main()
| apache-2.0 |
hsiaoyi0504/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
kashyapchhatbar/CLASHChimeras | build/lib/clashchimeras/parsers.py | 2 | 34332 | import csv
import gzip
import logging
import mmap
import os
import sys
import textwrap
from collections import Counter, defaultdict
from itertools import groupby
from operator import itemgetter
import pandas as pd
import pyfaidx
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
import clashchimeras
logger = logging.getLogger('root')
class GFF:
"""GFF file parser for mirbase gff3 file
This class uses memory-mapped file object to read a mirbase gff3 file. It
contains methods to read, process a gff3 file and return genomic coordinates
Attributes:
fileName: A mirbase gff3 file path
"""
def __init__(self, fileName=None):
self.features = {}
self.fileName = fileName
def read(self, featureType='miRNA_primary_transcript'):
"""Reads gff3 file provided during class initialization
Stores the byte positions of every feature in a dict object named
self.features
Keyword Args:
featureType: Feature type of a gff3 record, the third element of every
record in the file. Please change this if you want to store mature
form of microRNA, by default it uses primary transcript
(default 'miRNA_primary_transcript')
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split(";")
for attribute in attributes:
if attribute.startswith('Name'):
mirbase_name = attribute.split("=")[-1]
self.features[mirbase_name] = bytePosition
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def process(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid miRNA_primary_transcript name
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
self.mm.seek(self.features[name])
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split(";")
for attribute in attributes:
if attribute.startswith("ID"):
_id = attribute.split("=")[-1]
elif attribute.startswith("Name"):
_name = attribute.split("=")[-1]
record = Record(scaffold=row[0], start=int(row[3]), end=int(row[4]),
strand=row[6], mirbase_id=_id, mirbase_name=_name)
self.fileHandle.close()
return record
def coordinates(self, name, start=None, end=None):
"""A method to return a bed record containing genomic coordinates for the
aligned segment
Keyword Args:
start: The alignment start position of the cDNA molecule or the relative
start of the particular molecule
end: The alignment end position in the cDNA molecule or the relative end
of the particular molecule
Args:
name: A valid miRNA_primary_transcript name
Returns:
A tuple of strings containing elements for a bed record
"""
record = self.process(name)
if not start and not end:
start = 1
end = record.end - record.start + 1
positions = {}
match_positions = []
if record.strand == '+':
_start = 1
for relative, actual in enumerate(range(record.start - 1, record.end),
start=_start):
positions[relative] = actual
for pos in range(start, end + 1):
match_positions.append(positions[pos])
return [(record.scaffold, min(match_positions), max(match_positions) + 1,
record.mirbase_name, 0, record.strand)]
elif record.strand == '-':
_start = 1
for relative, actual in enumerate(reversed(range(record.start - 1,
record.end)), start=_start):
positions[relative] = actual
for pos in range(start, end + 1):
match_positions.append(positions[pos])
return [(record.scaffold, min(match_positions), max(match_positions) + 1,
record.mirbase_name, 0, record.strand)]
class GTF:
"""GTF file parser for gencode gtf file
This class uses memory-mapped file object to read a gencode gtf file. It
contains methods to read, process a gtf file and return genomic coordinates
Attributes:
fileName: A gencode gtf file path
"""
def __init__(self, fileName=None):
self.features = defaultdict(list)
self.biotypeFeatures = defaultdict(list)
self.geneFeatures = defaultdict(list)
self.fileName = fileName
self.geneIds = {}
def readBiotype(self, featureType='exon', biotype=None):
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
havana_transcript = '-'
havana_gene = '-'
exon_number = '0'
for attribute in attributes:
if attribute.startswith("transcript_id"):
transcript_id = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_type"):
transcript_type = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_number"):
exon_number = int(attribute.split(" ")[-1])
elif attribute.startswith("havana_gene"):
havana_gene = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("havana_transcript"):
havana_transcript = attribute.split(" ")[-1][1:-2]
elif attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_name"):
gene_name = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_name"):
transcript_name = attribute.split(" ")[-1][1:-1]
if biotype == 'tRNA':
if transcript_type == "tRNAscan":
self.biotypeFeatures[transcript_id].append((exon_number, row[0],
int(row[3]), int(row[4]),
row[6], gene_id,
havana_gene,
havana_transcript,
transcript_name,
gene_name))
else:
if transcript_type == biotype:
self.biotypeFeatures[transcript_id].append((exon_number, row[0],
int(row[3]), int(row[4]),
row[6], gene_id,
havana_gene,
havana_transcript,
transcript_name,
gene_name))
self.fileHandle.close()
def read(self, featureType='exon'):
"""Reads gtf file provided during class initialization
Stores the byte positions of every feature in a defaultdict(list) object
named self.features
Keyword Args:
featureType: Feature type of a gtf record, the third element of every
record in the file. Please change this if you want to get specific
records (e.g. 'UTR') (default 'exon')
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("transcript_id"):
transcript_id = attribute.split(" ")[-1][1:-1]
self.features[transcript_id].append(bytePosition)
self.geneIds[transcript_id] = gene_id
if attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
row = line.decode('utf-8').rstrip().split("\t")
if not row[0].startswith("#") and row[2] == featureType:
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("gene_id"):
gene_id = attribute.split(" ")[-1][1:-1]
self.geneFeatures[gene_id].append(bytePosition)
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def process(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid gencode transcript_id
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
positions = self.features[name]
for position in positions:
self.mm.seek(position)
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split("; ")
_eid = '-'
_enb = '0'
for attribute in attributes:
if attribute.startswith("transcript_type"):
_tt = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("transcript_id"):
_tid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_id"):
_eid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("exon_number"):
_enb = int(attribute.split(" ")[-1])
elif attribute.startswith("gene_name"):
_gn = attribute.split(" ")[-1][1:-1]
record = Record(scaffold=row[0], start=int(row[3]), end=int(row[4]),
strand=row[6], transcript_type=_tt, transcript_id=_tid, exon_id=_eid,
exon_number=_enb, gene_name=_gn)
yield record
self.fileHandle.close()
def geneExonicRegions(self, df):
"""Given a DataFrame with the exon coordinates from Gencode for a single
gene, return the total number of coding regions in that gene.
"""
scaffold = df.iloc[0].scaffold
strand = df.iloc[0].strand
gene_type = df.iloc[0].gene_type
gene_id = df.iloc[0].gene_id
gene_name = df.iloc[0].gene_name
start = df.start.min()
end = df.end.max()
bp = [False] * (end - start + 1)
for i in range(df.shape[0]):
s = df.iloc[i]['start'] - start
e = df.iloc[i]['end'] - start + 1
bp[s:e] = [True] * (e - s)
regions = list(range(start, end + 1))
groups = []
for i, j in groupby(bp):
groups.append((i, len(list(j))))
e_start = 0
for i in groups:
e_end = e_start + i[1]
if i[0]:
record = Record(scaffold=scaffold, start=regions[e_start],
end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,
gene_name=gene_name, strand=strand)
yield record
e_start += i[1]
def geneProcess(self, name):
"""A method to return a Record object providing genomic information
Args:
name: A valid gencode gene_id
Returns:
An object Record containing scaffold, start, end, strand, mirbase_id and
mirbase_name as its variables for access
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
positions = self.geneFeatures[name]
exons = []
for position in positions:
self.mm.seek(position)
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
attributes = row[-1].split("; ")
for attribute in attributes:
if attribute.startswith("gene_type"):
_gt = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_id"):
_gid = attribute.split(" ")[-1][1:-1]
elif attribute.startswith("gene_name"):
_gn = attribute.split(" ")[-1][1:-1]
exons.append((row[0], int(row[3]), int(row[4]), row[6], _gt, _gid, _gn))
self.fileHandle.close()
exons_df = pd.DataFrame(exons, columns=['scaffold', 'start', 'end',
'strand', 'gene_type', 'gene_id', 'gene_name'])
for record in self.geneExonicRegions(exons_df):
yield record
def coordinates(self, name, start=None, end=None):
"""A generator to return a bed record containing genomic coordinates for the
aligned segment
Keyword Args:
start: The alignment start position of the cDNA molecule or the relative
start of the particular molecule
end: The alignment end position in the cDNA molecule or the relative end
of the particular molecule
Args:
name: A valid miRNA_primary_transcript name
Returns:
A list of tuple(s) of strings containing elements for a bed record. There
may be more than one because of alternate splicing.
"""
if "|" in name:
self.name = name.split("|")[0]
else:
self.name = name
positions = {}
match_positions = []
records = []
segments = []
result_segments = []
for record in self.process(self.name):
records.append(record)
records.sort(key=lambda x: int(x.exon_number))
if records[0].strand == '+':
_start = 1
for record in records:
for relative, actual in enumerate(range(record.start, record.end + 1),
start=_start):
positions[relative] = actual
_start = relative + 1
for pos in range(start, end):
match_positions.append(positions[pos])
for key, group in groupby(enumerate(match_positions),
lambda x: x[0] - x[-1]):
segment = list(map(itemgetter(1), group))
segments.append([segment[0], segment[-1]])
for segment in segments:
for record in records:
if segment[0] >= record.start and segment[1] <= record.end:
result_segments.append((record.scaffold, segment[0], segment[1],
record.transcript_id + '|' + record.gene_name, 0, record.strand))
elif records[0].strand == '-':
_start = 1
for record in records:
for relative, actual in enumerate(reversed(range(record.start,
record.end + 1)), start=_start):
positions[relative] = actual
_start = relative + 1
for pos in range(start, end):
match_positions.append(positions[pos])
for key, group in groupby(enumerate(reversed(match_positions)),
lambda x: x[0] - x[-1]):
segment = list(map(itemgetter(1), group))
segments.append([segment[0], segment[-1]])
for segment in segments:
for record in records:
if segment[0] >= record.start and segment[1] <= record.end:
result_segments.append((record.scaffold, segment[0], segment[1],
record.transcript_id + '|' + record.gene_name, 0, record.strand))
if len(result_segments) == 0:
logger.debug('%s, %s, %s' % (name, start, end))
logger.debug('%s' % str(segments))
for r in records:
logger.debug('%s %s %s %s' % (r.scaffold, r.strand,
r.start, r.end))
return result_segments
class SAM:
"""SAM file parser for parsing bowtie2 generated files
This class uses memory-mapped file object to read a sam file
Attributes:
fileName: A sam file path
"""
def __init__(self, fileName=None):
self.fileName = fileName
self.records = {}
def read(self, flag=0):
"""Reads sam file provided during class initialization
Stores the byte position of every record based on the keyword arg flag
provided, to a dict object named self.records
Keyword Args:
flag: The SAM alignment flag for a record. For default, it uses the
primary alignment for every record and ignores secondary alignments
(default 0)
"""
logger.info('Reading %s' % self.fileName)
self.fileHandle = open(self.fileName, 'r+b')
bytePosition = self.fileHandle.tell()
for line in self.fileHandle:
read = line.decode('utf-8').split("\t")
if not read[0].startswith("@") and read[1] == str(flag):
self.records[read[0]] = bytePosition
bytePosition = self.fileHandle.tell()
self.fileHandle.close()
logger.debug('Reading %s finished' % self.fileName)
def access(self, queryName):
"""Provides random access of a record from the sam file
Args:
queryName: The query name of the read from the sam file
Returns:
A list generated after splitting the record line from sam file
"""
self.fileHandle = open(self.fileName, 'r+b')
self.mm = mmap.mmap(self.fileHandle.fileno(), 0)
self.mm.seek(self.records[queryName])
row = self.mm.readline().decode('utf-8').rstrip().split("\t")
self.fileHandle.close()
return self.pretty(row)
def filterPotentialChimeras(self, min_length=30, flag=0, target=None):
"""Generated a filtered fasta file from a sam file
This filtered fasta file contains reads that can be potentially chimeras.
The criteria for filtering is based on the minimum length
Keyword Args:
min_length: To be selected as a potential chimera, this is the minimum
read length (default 30)
flag: The SAM alignment flag describing the type of alignment (default 0)
target: The prefix for output file
"""
logger.debug('Filtering {} for potential chimeras'.format(target))
target = '{}.filter.fasta'.format(target.rpartition(".")[0])
if os.path.exists(target):
logger.info('Skipping filtering for {}'.format(target))
else:
with open(target, 'w') as oH:
with open(self.fileName) as iH:
for row in csv.reader(iH, delimiter="\t"):
if not row[0].startswith('@') and row[1] == str(flag):
if len(row[9]) >= 30:
print(textwrap.fill('>%s' % row[0], width=80), file=oH)
print(textwrap.fill('%s' % row[9], width=80), file=oH)
logger.debug('Filtering finished')
return target
def pretty(self, row):
refId = row[2]
start = int(row[3])
for i in row[10:]:
if i.startswith('MD'):
mismatchInfo = i
sequence = row[9]
cigar = row[5]
cigarString = clashchimeras.methods.convertCigar(row[5])
matchLength = cigarString.count("M") + cigarString.count("D")
end = start + matchLength - 1
record = Record(refId=refId, start=start, mismatchInfo=mismatchInfo,
sequence=sequence, cigarString=cigarString, matchLength=matchLength,
cigar=cigar, end=end)
return record
class Output:
"""Contains methods for writing output files
This class is used to generate every kind of output generated by this
package which includes plain text, ansi colored text and bed file
Attributes:
target: A prefix for output file which will be automatically followed by
extension (default 'wip')
overlap: Minimum overlap to be set between two molecules when determining
chimera (default 4)
gap: Maximum gap (number of unknown nucleotides) to be allowed between
two molecules within a chimera (default 9)
"""
def __init__(self,
target=None,
smallRNABed=False,
targetRNABed=False,
overlap=4,
gap=9):
self.target = target
self.overlap = overlap
self.gap = gap
if smallRNABed:
self.smallRNABedHandle = open('{}.smallRNA.bed'.format(self.target), 'w')
print('# BED locations of smallRNA part of the identified chimera',
file=self.smallRNABedHandle)
self.smallRNABedCSV = csv.writer(self.smallRNABedHandle, delimiter="\t")
self.smallRNABedCSV.writerow(
['# The name field represents the following:'])
self.smallRNABedCSV.writerow(
['# E.g. 201980-1-48|hsa-mir-100==PAPSS1'])
self.smallRNABedCSV.writerow(
['# 201980-1-48 is the fasta identifier'])
self.smallRNABedCSV.writerow(
["# 201980 is the unique identifier"])
self.smallRNABedCSV.writerow(
["# 1 is the number of times that sequence was observed in raw "
"fastq "])
self.smallRNABedCSV.writerow(
["# 48 is the length of the sequence"])
self.smallRNABedCSV.writerow(
['# hsa-mir-100 represents the smallRNA transcript'])
self.smallRNABedCSV.writerow(
['# PAPSS1 represents the gene symbol for targetRNA transcript '
'transcript '])
if targetRNABed:
self.targetRNABedHandle = open('{}.targetRNA.bed'.format(self.target),
'w')
self.targetRNABedCSV = csv.writer(self.targetRNABedHandle, delimiter="\t")
self.targetRNABedCSV.writerow(
['# The name field represents the following:'])
self.targetRNABedCSV.writerow(
['# E.g. 136019-1-48|ENST00000375759.6|SPEN==hsa-mir-103a-2'])
self.targetRNABedCSV.writerow(
['# 136019-1-48 is the fasta identifier'])
self.targetRNABedCSV.writerow(
["# 136019 is the unique identifier"])
self.targetRNABedCSV.writerow(
["# 1 is the number of times that sequence was observed in raw "
"fastq "])
self.targetRNABedCSV.writerow(
["# 48 is the length of the sequence"])
self.targetRNABedCSV.writerow(
["# ENST00000375759.6 is the targetRNA transcript identifier"])
self.targetRNABedCSV.writerow(
['# SPEN is the gene symbol for for targetRNA transcript '
'ENST00000375759.6'])
self.targetRNABedCSV.writerow(
['# hsa-mir-103a-2 represents the smallRNA transcript '])
self.hybWriter = open('%s.chimeras.tsv' % self.target, 'w')
self.hybComments()
def hybComments(self):
print("# fasta Identifier: The identifier in <sample>.unique.fasta. ",
"#\tE.g. 123456-3-68 ",
"#\t123456 is the unique identifier",
"#\t3 is the number of times that sequence was observed in raw "
"fastq ",
"#\t68 is the length of the sequence", sep="\n", file=self.hybWriter)
print("# smallRNA: The cDNA ID of the type of RNA labelled as smallRNA in "
"the analysis",
"#\tE.g. hsa-let-7b (miRBase identifier)",
"#\tE.g. ENST00000619178.1|SNORD3D| (Gencode snoRNA identifier)",
sep="\n", file=self.hybWriter)
print("# smallRNA_start: cDNA alignment start position of the smallRNA "
"part of the chimera", file=self.hybWriter)
print("# smallRNA_MDtag: Showing the MD tag from the smallRNA SAM "
"alignment for the chimera",
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
"#\tMD Z String for mismatching positions.Regex:[0-9]+((["
"A-Z]|\^[A-Z]+)[0-9]+)*9", sep="\n", file=self.hybWriter)
print('# smallRNA_cigar: Cigar string from the smallRNA SAM alignment for '
'the chimera',
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
'#\tSee CIGAR in the file', sep="\n", file=self.hybWriter)
print('# arbitrary_chimera: The chimera representation indicating what '
'part of the sequence represents smallRNA and targetRNA',
'#\t{ is representing a match with smallRNA',
'#\t} is representing a match with targetRNA',
'#\t# is representing unaligned sequences (identified as --gap -ga)',
'#\t- is representing a deletion (D in cigar string)',
'#\t+ is representing a deletion (I in cigar string)',
'#\tE.g {{{{{{{{-{{{{{{{{{{{{{##}}}}}}}}}}+}}}}}}}}}}}}}}}}}}}}}}'
'#\tE.g The first 22 nucleotides are aligning to smallRNA cDNA',
'#\tE.g The last 33 nucleotides are aligning to targetRNA cDNA',
sep="\n", file=self.hybWriter)
print('# read_sequence: The actual sequence that is appeared in raw '
'reads', file=self.hybWriter)
print("# targetRNA: The cDNA ID of the type of RNA labelled as targetRNA "
"in "
"the analysis",
"#\tE.g. hsa-let-7b (miRBase identifier)",
"#\tE.g. ENST00000619178.1|SNORD3D| (Gencode snoRNA identifier)",
sep="\n", file=self.hybWriter)
print("# targetRNA_start: cDNA alignment start position of the targetRNA "
"part of the chimera", file=self.hybWriter)
print("# targetRNA_MDtag: Showing the MD tag from the targetRNA SAM "
"alignment for the chimera",
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
"#\tMD Z String for mismatching positions.Regex:[0-9]+((["
"A-Z]|\^[A-Z]+)[0-9]+)*9", sep="\n", file=self.hybWriter)
print('# targetRNA_cigar: Cigar string from the targetRNA SAM alignment '
'for '
'the chimera',
"#\tSAM file format specification",
"#\thttp://samtools.github.io/hts-specs/SAMv1.pdf",
'#\tSee CIGAR in the file', sep="\n", file=self.hybWriter)
print("# fasta_Identifier", "smallRNA", "smallRNA_start", "smallRNA_MDtag",
"smallRNA_cigar", "arbitrary_chimera", "read_sequence", "targetRNA",
"targetRNA_start", "targetRNA_MDtag", "targetRNA_cigar", sep="\t",
file=self.hybWriter)
def writeTargetRNABed(self, query, targetRNASegments, smallRNA):
if "ENS" in smallRNA and "|" in smallRNA:
_smallRNA = smallRNA.split("|")[5]
else:
_smallRNA = smallRNA
for segment in targetRNASegments:
_segment = list(segment)
_segment[3] = query + "|" + _segment[3] + "==" + _smallRNA
self.targetRNABedCSV.writerow(_segment)
def writeSmallRNABed(self, query, smallRNASegments, targetRNA):
if "ENS" in targetRNA and "|" in targetRNA:
_targetRNA = targetRNA.split("|")[5]
else:
_targetRNA = targetRNA
for segment in smallRNASegments:
_segment = list(segment)
_segment[3] = query + "|" + _segment[3] + "==" + _targetRNA
self.smallRNABedCSV.writerow(_segment)
def write(self, queryName, smallRNA, targetRNA):
chimeraString = clashchimeras.methods.chimeraOrNot(smallRNA.cigarString,
targetRNA.cigarString, overlap=self.overlap, gap=self.gap)
smallRNARegion = clashchimeras.methods.findRegion(smallRNA)
targetRNARegion = clashchimeras.methods.findRegion(targetRNA)
print(queryName, smallRNARegion, smallRNA.start, smallRNA.mismatchInfo,
smallRNA.cigar, chimeraString, smallRNA.sequence,
targetRNARegion, targetRNA.start,
targetRNA.mismatchInfo, targetRNA.cigar, sep="\t", file=self.hybWriter)
def __del__(self):
self.hybWriter.close()
class Fasta:
def __init__(self, genome=None, gtf=None):
self.genome = genome
self.gtf = gtf
self.faidx = pyfaidx.Fasta(self.genome)
def getBiotype(self, output=None, biotype=None):
self.sequences = []
g = GTF(fileName=self.gtf)
if biotype == 'tRNA':
g.readBiotype(biotype=biotype, featureType='tRNAscan')
else:
g.readBiotype(biotype=biotype)
for transcript_id, exons in g.biotypeFeatures.items():
temp_seq = ''
exons.sort(key=itemgetter(0))
for exon in exons:
if exon[4] == '-':
temp_seq += (-self.faidx[exon[1]][exon[2] - 1:exon[3]]).seq
elif exon[4] == '+':
temp_seq += self.faidx[exon[1]][exon[2] - 1:exon[3]].seq
_id = '{}|{}|{}|{}|{}|{}|{}'.format(transcript_id,
exons[0][5],
exons[0][6],
exons[0][7],
exons[0][8],
exons[0][9],
len(temp_seq))
temp_rec = SeqRecord(seq=Seq(temp_seq), id=_id,
description='')
self.sequences.append(temp_rec)
if not output:
logger.error('Please provide output file..')
sys.exit()
else:
logger.info('Writing {}'.format(output))
SeqIO.write(self.sequences, output, 'fasta')
class Fastq:
def __init__(self, fileName=None, compressed=False):
self.fileName = fileName
self.compressed = compressed
self.n = 4
self.sequences = Counter()
self.uniqueOutput = fileName.rpartition(".")[0] + '.unique.fasta'
def recordIterator(self):
record = []
record_length = 0
for line in self.fileHandle:
if record_length == self.n:
yield record
record_length = 0
record = []
record.append(line.decode().rstrip())
record_length += 1
yield record
def createUnique(self):
if self.compressed:
self.fileHandle = gzip.open(self.fileName, 'rb')
else:
self.fileHandle = open(self.fileName, 'rb')
logger.info('Reading {}'.format(self.fileName))
for record in self.recordIterator():
self.sequences[record[1]] += 1
logger.info('Writing {}'.format(self.uniqueOutput))
with open(self.uniqueOutput, 'w') as wH:
for index, (sequence, counts) in enumerate(sorted(self.sequences.items(),
key=itemgetter(1), reverse=True), start=1):
print('>{}-{}-{}'.format(index, counts, len(sequence)), file=wH)
print(textwrap.fill(sequence, width=80), file=wH)
logger.debug('Finished writing {}'.format(self.uniqueOutput))
self.fileHandle.close()
class Record:
"""A custom object (preferred over dict) for easy access using variables
It's a dependency for GTF and GFF classes
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
| mit |
FXIhub/hummingbird | scripts/masking/mask.py | 2 | 3250 | #!/usr/bin/env python
import sys,argparse
import numpy
import os
import time, datetime
import h5py
import scipy.misc
import configobj
def get_valid_stacks(f_names):
f_names_valid = []
for fn in f_names:
with h5py.File(fn,"r") as f:
if "mean" in f.keys():
f_names_valid.append(fn)
return f_names_valid
def get_dims(f_name):
with h5py.File(f_name,"r") as f:
s = numpy.shape(f["mean"])
list(s).pop(0)
return tuple(s)
def get_max_mask(f_names, ds_name, threshold):
d = []
for fn in f_names:
with h5py.File(fn, "r") as f:
d.append(numpy.array(f[ds_name]))
return (numpy.mean(d,axis=0) < threshold)
def get_min_mask(f_names, ds_name, threshold):
d = []
for fn in f_names:
with h5py.File(fn, "r") as f:
d.append(numpy.array(f[ds_name]))
return (numpy.mean(d,axis=0) > threshold)
def get_badpixelmask(f_name):
if f_name[-3:] == ".h5":
with h5py.File(f_name, "r"):
m = numpy.array(f["/data/data"])
elif f_name[-4:] == ".png":
m = scipy.misc.imread(f_name,flatten=True) / 255.
return m
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Hummingbird mask tool. Creates mask from stack files in current directory and given configuration file.')
parser.add_argument('config', type=str,
help="Configuration file")
parser.add_argument('-l', '--link', type=str, help="Creates symbolic link to the H5 mask from given path")
if(len(sys.argv) == 1):
parser.print_help()
args = parser.parse_args()
C = configobj.ConfigObj(args.config)
files = os.listdir(".")
files = [f for f in files if len(f) > 3]
files = [f for f in files if f[-3:] == ".h5"]
files = get_valid_stacks(files)
if len(files) == 0:
sys.exit(0)
s = get_dims(files[0])
mask = numpy.ones(shape=s, dtype="bool")
if C["mean_max"].lower() != 'none':
mask *= get_max_mask(files, "mean", float(C["mean_max"]))
if C["std_max"].lower() != 'none':
mask *= get_max_mask(files, "std", float(C["std_max"]))
if C["median_max"].lower() != 'none':
mask *= get_max_mask(files, "median", float(C["median_max"]))
if C["mean_min"].lower() != 'none':
mask *= get_min_mask(files, "mean", float(C["mean_min"]))
if C["std_min"].lower() != 'none':
mask *= get_min_mask(files, "std", float(C["std_min"]))
if C["median_min"].lower() != 'none':
mask *= get_min_mask(files, "median", float(C["median_min"]))
if C["badpixelmask"].lower() != 'none':
mask *= get_badpixelmask(C["badpixelmask"])
fn_root = files[-1].split("/")[-1][:-3]
outdir = C["outdir"]
os.system("mkdir -p %s" % outdir)
if bool(C["output_png"].lower()):
import matplotlib.pyplot as pypl
pypl.imsave("%s/mask_%s.png" % (outdir,fn_root), mask, cmap="binary_r", vmin=0, vmax=1)
with h5py.File("%s/mask_%s.h5" % (outdir,fn_root), "w") as f:
f["data/data"] = mask
os.system("cp %s %s/mask_%s.conf" % (args.config,outdir,fn_root))
if args.link:
os.system("ln -s -f %s/mask_%s.h5 %s" % (outdir, fn_root, args.link))
| bsd-2-clause |
mjgrav2001/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
ajenhl/tacl | tacl/data_store.py | 1 | 35278 | """Module containing the DataStore class."""
import csv
import logging
import os.path
import sqlite3
import sys
import tempfile
import pandas as pd
from . import constants
from .exceptions import (MalformedDataStoreError, MalformedQueryError,
MalformedResultsError)
from .text import WitnessText
class DataStore:
"""Class representing the data store for text data.
It provides an interface to the underlying database, with methods
to add and query data.
"""
def __init__(self, db_name, use_memory=True, ram=0, must_exist=True):
self._logger = logging.getLogger(__name__)
if db_name == ':memory:':
self._db_name = db_name
else:
self._db_name = os.path.abspath(db_name)
if must_exist and not os.path.exists(self._db_name):
raise MalformedDataStoreError(
constants.MISSING_DATA_STORE_ERROR.format(self._db_name))
self._conn = sqlite3.connect(self._db_name)
self._conn.row_factory = sqlite3.Row
if use_memory:
self._conn.execute(constants.PRAGMA_TEMP_STORE_SQL)
if ram:
cache_size = ram * -1000000
self._conn.execute(constants.PRAGMA_CACHE_SIZE_SQL.format(
cache_size))
self._conn.execute(constants.PRAGMA_COUNT_CHANGES_SQL)
self._conn.execute(constants.PRAGMA_FOREIGN_KEYS_SQL)
self._conn.execute(constants.PRAGMA_LOCKING_MODE_SQL)
self._conn.execute(constants.PRAGMA_SYNCHRONOUS_SQL)
def _add_indices(self):
"""Adds the database indices relating to n-grams."""
self._logger.info('Adding database indices')
self._conn.execute(constants.CREATE_INDEX_TEXTNGRAM_SQL)
self._logger.info('Indices added')
def add_ngrams(self, corpus, minimum, maximum, catalogue=None,
text_class=WitnessText):
"""Adds n-gram data from `corpus` to the data store.
:param corpus: corpus of works
:type corpus: `Corpus`
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int`
:param catalogue: optional catalogue to limit corpus to
:type catalogue: `Catalogue`
:param text_class: class to use to represent each witness
:type text_class: subclass of `Text`
"""
if not isinstance(minimum, int) or not isinstance(maximum, int):
raise MalformedQueryError(
constants.NGRAM_SIZE_MUST_BE_INTEGER_ERROR)
if minimum < 1:
raise MalformedQueryError(constants.NGRAM_SIZE_TOO_SMALL_ERROR)
if minimum > maximum:
raise MalformedQueryError(
constants.NGRAM_MINIMUM_SIZE_GREATER_THAN_MAXIMUM_ERROR)
self._initialise_database()
if catalogue:
for work in catalogue:
db_witnesses = self._get_text_ids(work)
for witness in corpus.get_witnesses(
work, text_class=text_class):
text_id = self._add_text_ngrams(witness, minimum, maximum)
db_witnesses.pop(text_id, None)
for text_id, names in db_witnesses:
self._delete_text(text_id, *names)
else:
db_witnesses = self._get_text_ids()
for witness in corpus.get_witnesses(text_class=text_class):
text_id = self._add_text_ngrams(witness, minimum, maximum)
db_witnesses.pop(text_id, None)
for text_id, names in db_witnesses.items():
self._delete_text(text_id, *names)
self._add_indices()
self._analyse()
def _add_temporary_ngrams(self, ngrams):
"""Adds `ngrams` to a temporary table."""
# Remove duplicate n-grams, empty n-grams, and non-string n-grams.
ngrams = [ngram for ngram in ngrams if ngram and isinstance(
ngram, str)]
# Deduplicate while preserving order (useful for testing).
seen = {}
ngrams = [seen.setdefault(x, x) for x in ngrams if x not in seen]
self._conn.execute(constants.DROP_TEMPORARY_NGRAMS_TABLE_SQL)
self._conn.execute(constants.CREATE_TEMPORARY_NGRAMS_TABLE_SQL)
self._conn.executemany(constants.INSERT_TEMPORARY_NGRAM_SQL,
[(ngram,) for ngram in ngrams])
def _add_temporary_results_sets(self, results_filenames, labels):
if len(labels) < 2:
raise MalformedQueryError(
constants.INSUFFICIENT_LABELS_QUERY_ERROR)
if len(results_filenames) != len(labels):
raise MalformedQueryError(
constants.SUPPLIED_ARGS_LENGTH_MISMATCH_ERROR)
self._create_temporary_results_table()
for results_filename, label in zip(results_filenames, labels):
with open(results_filename, encoding='utf-8', newline='') as fh:
self._add_temporary_results(fh, label)
self._add_temporary_results_index()
self._analyse('temp.InputResults')
def _add_temporary_results(self, results, label):
"""Adds `results` to a temporary table with `label`.
:param results: results file
:type results: `File`
:param label: label to be associated with results
:type label: `str`
"""
NGRAM, SIZE, NAME, SIGLUM, COUNT, LABEL = constants.QUERY_FIELDNAMES
reader = csv.DictReader(results)
try:
data = [(row[NGRAM], row[SIZE], row[NAME], row[SIGLUM], row[COUNT],
label) for row in reader]
except KeyError:
missing_cols = [col for col in constants.QUERY_FIELDNAMES if col
not in reader.fieldnames]
raise MalformedResultsError(
constants.MISSING_REQUIRED_COLUMNS_ERROR.format(
', '.join(missing_cols)))
self._conn.executemany(constants.INSERT_TEMPORARY_RESULTS_SQL, data)
def _add_temporary_results_index(self):
self._logger.info('Adding index to temporary results table')
self._conn.execute(constants.CREATE_INDEX_INPUT_RESULTS_SQL)
self._logger.info('Index added')
def _add_text_ngrams(self, witness, minimum, maximum):
"""Adds n-gram data from `witness` to the data store.
:param witness: witness to get n-grams from
:type witness: `WitnessText`
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int`
:rtype: `int`
"""
text_id = self._get_text_id(witness)
self._logger.info('Adding n-grams ({} <= n <= {}) for {}'.format(
minimum, maximum, witness.get_filename()))
skip_sizes = []
for size in range(minimum, maximum + 1):
if self._has_ngrams(text_id, size):
self._logger.info(
'{}-grams are already in the database'.format(size))
skip_sizes.append(size)
for size, ngrams in witness.get_ngrams(minimum, maximum, skip_sizes):
self._add_text_size_ngrams(text_id, size, ngrams)
return text_id
def _add_text_record(self, witness):
"""Adds a Text record for `witness`.
:param witness: witness to add a record for
:type text: `WitnessText`
"""
filename = witness.get_filename()
self._logger.info('Adding record for text {}'.format(filename))
checksum = witness.get_checksum()
token_count = len(witness.get_tokens())
with self._conn:
cursor = self._conn.execute(
constants.INSERT_TEXT_SQL,
[witness.work, witness.siglum, checksum, token_count, ''])
return cursor.lastrowid
def _add_text_size_ngrams(self, text_id, size, ngrams):
"""Adds `ngrams`, that are of size `size`, to the data store.
The added `ngrams` are associated with `text_id`.
:param text_id: database ID of text associated with `ngrams`
:type text_id: `int`
:param size: size of n-grams
:type size: `int`
:param ngrams: n-grams to be added
:type ngrams: `collections.Counter`
"""
unique_ngrams = len(ngrams)
self._logger.info('Adding {} unique {}-grams'.format(
unique_ngrams, size))
parameters = [[text_id, ngram, size, count]
for ngram, count in ngrams.items()]
with self._conn:
self._conn.execute(constants.INSERT_TEXT_HAS_NGRAM_SQL,
[text_id, size, unique_ngrams])
self._conn.executemany(constants.INSERT_NGRAM_SQL, parameters)
def _analyse(self, table=''):
"""Analyses the database, or `table` if it is supplied.
:param table: optional name of table to analyse
:type table: `str`
"""
self._logger.info('Starting analysis of database')
self._conn.execute(constants.ANALYSE_SQL.format(table))
self._logger.info('Analysis of database complete')
@staticmethod
def _check_diff_result(row, matches, tokenize, join):
"""Returns `row`, possibly with its count changed to 0, depending on
the status of the n-grams that compose it.
The n-gram represented in `row` can be decomposed into two
(n-1)-grams. If neither sub-n-gram is present in `matches`, do
not change the count since this is a new difference.
If both sub-n-grams are present with a positive count, do not
change the count as it is composed entirely of sub-ngrams and
therefore not filler.
Otherwise, change the count to 0 as the n-gram is filler.
:param row: result row of the n-gram to check
:type row: pandas.Series
:param matches: (n-1)-grams and their associated counts to check
against
:type matches: `dict`
:param tokenize: function to tokenize a string
:type tokenize: `function`
:param join: function to join tokens into a string
:type join: `function`
:rtype: pandas.Series
"""
ngram_tokens = tokenize(row[constants.NGRAM_FIELDNAME])
sub_ngram1 = join(ngram_tokens[:-1])
sub_ngram2 = join(ngram_tokens[1:])
count = constants.COUNT_FIELDNAME
discard = False
# For performance reasons, avoid searching through matches
# unless necessary.
status1 = matches.get(sub_ngram1)
if status1 == 0:
discard = True
else:
status2 = matches.get(sub_ngram2)
if status2 == 0:
discard = True
elif (status1 is None) ^ (status2 is None):
discard = True
if discard:
row[count] = 0
return row
def counts(self, catalogue, output_fh):
"""Returns `output_fh` populated with CSV results giving
n-gram counts of the witnesses of the works in `catalogue`.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:param output_fh: object to output results to
:type output_fh: file-like object
:rtype: file-like object
"""
labels = list(self._set_labels(catalogue))
label_placeholders = self._get_placeholders(labels)
query = constants.SELECT_COUNTS_SQL.format(label_placeholders)
self._logger.info('Running counts query')
self._logger.debug('Query: {}\nLabels: {}'.format(query, labels))
cursor = self._conn.execute(query, labels)
return self._csv(cursor, constants.COUNTS_FIELDNAMES, output_fh)
def _create_temporary_results_table(self):
self._conn.execute(constants.DROP_TEMPORARY_RESULTS_TABLE_SQL)
self._conn.execute(constants.CREATE_TEMPORARY_RESULTS_TABLE_SQL)
def _csv(self, cursor, fieldnames, output_fh):
"""Writes the rows of `cursor` in CSV format to `output_fh`
and returns it.
:param cursor: database cursor containing data to be output
:type cursor: `sqlite3.Cursor`
:param fieldnames: row headings
:type fieldnames: `list`
:param output_fh: file to write data to
:type output_fh: file object
:rtype: file object
"""
self._logger.info('Finished query; outputting results in CSV format')
# Specify a lineterminator to avoid an extra \r being added on
# Windows; see
# https://stackoverflow.com/questions/3191528/csv-in-python-adding-extra-carriage-return
if sys.platform in ('win32', 'cygwin') and output_fh is sys.stdout:
writer = csv.writer(output_fh, lineterminator='\n')
else:
writer = csv.writer(output_fh)
writer.writerow(fieldnames)
for row in cursor:
writer.writerow(row)
self._logger.info('Finished outputting results')
return output_fh
def _csv_temp(self, cursor, fieldnames):
"""Writes the rows of `cursor` in CSV format to a temporary file and
returns the path to that file.
:param cursor: database cursor containing data to be output
:type cursor: `sqlite3.Cursor`
:param fieldnames: row headings
:type fieldnames: `list`
:rtype: `str`
"""
temp_fd, temp_path = tempfile.mkstemp(text=True)
with open(temp_fd, 'w', encoding='utf-8', newline='') as results_fh:
self._csv(cursor, fieldnames, results_fh)
return temp_path
def _delete_text(self, text_id, work, siglum):
"""Deletes the text identified by `text_id` from the database.
:param text_id: database ID of text
:type text_id: `int`
:param work: name of text's work
:type work: `str`
:param siglum: text's siglum
:type siglum: `str`
"""
self._logger.info('Deleting text {} {} from database'.format(
work, siglum))
with self._conn:
self._conn.execute(constants.DELETE_TEXT_SQL, [text_id])
def _delete_text_ngrams(self, text_id):
"""Deletes all n-grams associated with `text_id` from the data
store.
:param text_id: database ID of text
:type text_id: `int`
"""
with self._conn:
self._conn.execute(constants.DELETE_TEXT_NGRAMS_SQL, [text_id])
self._conn.execute(constants.DELETE_TEXT_HAS_NGRAMS_SQL, [text_id])
def _diff(self, cursor, tokenizer, output_fh):
"""Returns output_fh with diff results that have been reduced.
Uses a temporary file to store the results from `cursor`
before being reduced, in order to not have the results stored
in memory twice.
:param cursor: database cursor containing raw diff data
:type cursor: `sqlite3.Cursor`
:param tokenizer: tokenizer for the n-grams
:type tokenizer: `Tokenizer`
:type output_fh: file-like object
:rtype: file-like object
"""
temp_path = self._csv_temp(cursor, constants.QUERY_FIELDNAMES)
output_fh = self._reduce_diff_results(temp_path, tokenizer, output_fh)
try:
os.remove(temp_path)
except OSError as e:
self._logger.error('Failed to remove temporary file containing '
'unreduced results: {}'.format(e))
return output_fh
def diff(self, catalogue, tokenizer, output_fh):
"""Returns `output_fh` populated with CSV results giving the n-grams
that are unique to the witnesses of each labelled set of works
in `catalogue`.
Note that this is not the same as the symmetric difference of
these sets, except in the case where there are only two
labels.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:param tokenizer: tokenizer for the n-grams
:type tokenizer: `Tokenizer`
:param output_fh: object to output results to
:type output_fh: file-like object
:rtype: file-like object
"""
labels = self._sort_labels(self._set_labels(catalogue))
if len(labels) < 2:
raise MalformedQueryError(
constants.INSUFFICIENT_LABELS_QUERY_ERROR)
label_placeholders = self._get_placeholders(labels)
query = constants.SELECT_DIFF_SQL.format(label_placeholders,
label_placeholders)
parameters = labels + labels
self._logger.info('Running diff query')
self._logger.debug('Query: {}\nLabels: {}'.format(query, labels))
self._log_query_plan(query, parameters)
cursor = self._conn.execute(query, parameters)
return self._diff(cursor, tokenizer, output_fh)
def diff_asymmetric(self, catalogue, prime_label, tokenizer, output_fh):
"""Returns `output_fh` populated with CSV results giving the
difference in n-grams between the witnesses of labelled sets
of works in `catalogue`, limited to those works labelled with
`prime_label`.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:param prime_label: label to limit results to
:type prime_label: `str`
:param tokenizer: tokenizer for the n-grams
:type tokenizer: `Tokenizer`
:param output_fh: object to output results to
:type output_fh: file-like object
:rtype: file-like object
"""
labels = list(self._set_labels(catalogue))
if len(labels) < 2:
raise MalformedQueryError(
constants.INSUFFICIENT_LABELS_QUERY_ERROR)
try:
labels.remove(prime_label)
except ValueError:
raise MalformedQueryError(constants.LABEL_NOT_IN_CATALOGUE_ERROR)
label_placeholders = self._get_placeholders(labels)
query = constants.SELECT_DIFF_ASYMMETRIC_SQL.format(label_placeholders)
parameters = [prime_label, prime_label] + labels
self._logger.info('Running asymmetric diff query')
self._logger.debug('Query: {}\nLabels: {}\nPrime label: {}'.format(
query, labels, prime_label))
self._log_query_plan(query, parameters)
cursor = self._conn.execute(query, parameters)
return self._diff(cursor, tokenizer, output_fh)
def diff_supplied(self, results_filenames, labels, tokenizer, output_fh):
"""Returns `output_fh` populated with CSV results giving the n-grams
that are unique to the witnesses in each set of works in
`results_sets`, using the labels in `labels`.
Note that this is not the same as the symmetric difference of
these sets, except in the case where there are only two
labels.
:param results_filenames: list of results filenames to be diffed
:type results_filenames: `list` of `str`
:param labels: labels to be applied to the results_sets
:type labels: `list`
:param tokenizer: tokenizer for the n-grams
:type tokenizer: `Tokenizer`
:param output_fh: object to output results to
:type output_fh: file-like object
:rtype: file-like object
"""
self._add_temporary_results_sets(results_filenames, labels)
query = constants.SELECT_DIFF_SUPPLIED_SQL
self._logger.info('Running supplied diff query')
self._logger.debug('Query: {}'.format(query))
self._log_query_plan(query, [])
cursor = self._conn.execute(query)
return self._diff(cursor, tokenizer, output_fh)
def _drop_indices(self):
"""Drops the database indices relating to n-grams."""
self._logger.info('Dropping database indices')
self._conn.execute(constants.DROP_TEXTNGRAM_INDEX_SQL)
self._logger.info('Finished dropping database indices')
def _get_checksum(self, text_id):
"""Returns the checksum for the text with `text_id`."""
@staticmethod
def _get_intersection_subquery(labels):
# Create nested subselects.
subquery = constants.SELECT_INTERSECT_SUB_SQL
# The subqueries are nested in reverse order of 'size', so
# that the inmost select is operating on the smallest corpus,
# thereby minimising the result sets of outer queries the most.
for label in labels[1:]:
subquery = constants.SELECT_INTERSECT_SUB_SQL + \
constants.SELECT_INTERSECT_SUB_EXTRA_SQL.format(
subquery)
return subquery
@staticmethod
def _get_placeholders(items):
"""Returns a string of placeholders, one for each item in
`items`.
:param items: items to create placeholders for
:type items: `list`
:rtype: `str`
"""
return ('?,' * len(items)).strip(',')
def _get_text_id(self, witness):
"""Returns the database ID of the Text record for `witness`.
This may require creating such a record.
If `text`\'s checksum does not match an existing record's
checksum, the record's checksum is updated and all associated
TextNGram and TextHasNGram records are deleted.
:param witness: witness to add a record for
:type witness: `WitnessText`
:rtype: `int`
"""
text_record = self._conn.execute(
constants.SELECT_TEXT_SQL,
[witness.work, witness.siglum]).fetchone()
if text_record is None:
text_id = self._add_text_record(witness)
else:
text_id = text_record['id']
if text_record['checksum'] != witness.get_checksum():
filename = witness.get_filename()
self._logger.info('Text {} has changed since it was added to '
'the database'.format(filename))
self._update_text_record(witness, text_id)
self._logger.info('Deleting potentially out-of-date n-grams')
self._delete_text_ngrams(text_id)
return text_id
def _get_text_ids(self, work=None):
"""Returns a dictionary of IDs of texts in the database, with work and
siglum as values for each.
If `work` is supplied, returns IDs of texts that are
associated with the specified work.
:param work: optional name of work to limit texts to
:type work: `str`
:rtype: `dict`
"""
if work is None:
query = constants.SELECT_TEXTS_SQL
rows = self._conn.execute(query).fetchall()
else:
query = constants.SELECT_WORK_TEXTS_SQL
rows = self._conn.execute(query, [work]).fetchall()
return {row['id']: [row['work'], row['siglum']] for row in rows}
def _has_ngrams(self, text_id, size):
"""Returns True if a text has existing records for n-grams of
size `size`.
:param text_id: database ID of text to check
:type text_id: `int`
:param size: size of n-grams
:type size: `int`
:rtype: `bool`
"""
if self._conn.execute(constants.SELECT_HAS_NGRAMS_SQL,
[text_id, size]).fetchone() is None:
return False
return True
def _initialise_database(self):
"""Creates the database schema.
This will not create tables or indices that already exist and
is safe to be called on an existing database.
"""
self._logger.info('Creating database schema, if necessary')
self._conn.execute(constants.CREATE_TABLE_TEXT_SQL)
self._conn.execute(constants.CREATE_TABLE_TEXTNGRAM_SQL)
self._conn.execute(constants.CREATE_TABLE_TEXTHASNGRAM_SQL)
self._conn.execute(constants.CREATE_INDEX_TEXTHASNGRAM_SQL)
self._conn.execute(constants.CREATE_INDEX_TEXT_SQL)
def intersection(self, catalogue, output_fh):
"""Returns `output_fh` populated with CSV results giving the
intersection in n-grams of the witnesses of labelled sets of
works in `catalogue`.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:param output_fh: object to output results to
:type output_fh: file-like object
:rtype: file-like object
"""
labels = self._sort_labels(self._set_labels(catalogue))
if len(labels) < 2:
raise MalformedQueryError(
constants.INSUFFICIENT_LABELS_QUERY_ERROR)
label_placeholders = self._get_placeholders(labels)
subquery = self._get_intersection_subquery(labels)
query = constants.SELECT_INTERSECT_SQL.format(label_placeholders,
subquery)
parameters = labels + labels
self._logger.info('Running intersection query')
self._logger.debug('Query: {}\nLabels: {}'.format(query, labels))
self._log_query_plan(query, parameters)
cursor = self._conn.execute(query, parameters)
return self._csv(cursor, constants.QUERY_FIELDNAMES, output_fh)
def intersection_supplied(self, results_filenames, labels, output_fh):
"""Returns `output_fh` populated with CSV results giving the n-grams
that are common to witnesses in every set of works in
`results_sets`, using the labels in `labels`.
:param results_filenames: list of results to be diffed
:type results_filenames: `list` of `str`
:param labels: labels to be applied to the results_sets
:type labels: `list`
:param output_fh: object to output results to
:type output_fh: file-like object
:rtype: file-like object
"""
self._add_temporary_results_sets(results_filenames, labels)
query = constants.SELECT_INTERSECT_SUPPLIED_SQL
parameters = [len(labels)]
self._logger.info('Running supplied intersect query')
self._logger.debug('Query: {}\nNumber of labels: {}'.format(
query, parameters[0]))
self._log_query_plan(query, parameters)
cursor = self._conn.execute(query, parameters)
return self._csv(cursor, constants.QUERY_FIELDNAMES, output_fh)
def _log_query_plan(self, query, parameters):
cursor = self._conn.execute('EXPLAIN QUERY PLAN ' + query, parameters)
query_plan = 'Query plan:\n'
for row in cursor.fetchall():
query_plan += '|'.join([str(value) for value in row]) + '\n'
self._logger.debug(query_plan)
def _reduce_diff_results(self, matches_path, tokenizer, output_fh):
"""Returns `output_fh` populated with a reduced set of data from
`matches_fh`.
Diff results typically contain a lot of filler results that
serve only to hide real differences. If one text has a single
extra token than another, the diff between them will have
results for every n-gram containing that extra token, which is
not helpful. This method removes these filler results by
'reducing down' the results.
:param matches_path: filepath or buffer of CSV results to be reduced
:type matches_path: `str` or file-like object
:param tokenizer: tokenizer for the n-grams
:type tokenizer: `Tokenizer`
:param output_fh: object to write results to
:type output_fh: file-like object
:rtype: file-like object
"""
self._logger.info('Removing filler results')
# For performance, perform the attribute accesses once.
tokenize = tokenizer.tokenize
join = tokenizer.joiner.join
results = []
previous_witness = (None, None)
previous_data = {}
# Calculate the index of ngram and count columns in a Pandas
# named tuple row, as used below. The +1 is due to the tuple
# having the row index as the first element.
ngram_index = constants.QUERY_FIELDNAMES.index(
constants.NGRAM_FIELDNAME) + 1
count_index = constants.QUERY_FIELDNAMES.index(
constants.COUNT_FIELDNAME) + 1
# Operate over individual witnesses and sizes, so that there
# is no possible results pollution between them.
grouped = pd.read_csv(matches_path, encoding='utf-8',
na_filter=False).groupby(
[constants.WORK_FIELDNAME, constants.SIGLUM_FIELDNAME,
constants.SIZE_FIELDNAME])
for (work, siglum, size), group in grouped:
if (work, siglum) != previous_witness:
previous_matches = group
previous_witness = (work, siglum)
else:
self._logger.debug(
'Reducing down {} {}-grams for {} {}'.format(
len(group.index), size, work, siglum))
if previous_matches.empty:
reduced_count = 0
else:
previous_matches = group.apply(
self._check_diff_result, axis=1,
args=(previous_data, tokenize, join))
reduced_count = len(previous_matches[previous_matches[
constants.COUNT_FIELDNAME] != 0].index)
self._logger.debug('Reduced down to {} grams'.format(
reduced_count))
# Put the previous matches into a form that is more
# performant for the lookups made in _check_diff_result.
previous_data = {}
for row in previous_matches.itertuples():
previous_data[row[ngram_index]] = row[count_index]
if not previous_matches.empty:
results.append(previous_matches[previous_matches[
constants.COUNT_FIELDNAME] != 0])
reduced_results = pd.concat(results, ignore_index=True).reindex(
columns=constants.QUERY_FIELDNAMES)
reduced_results.to_csv(output_fh, encoding='utf-8', float_format='%d',
index=False)
return output_fh
def search(self, catalogue, ngrams, output_fh):
"""Returns `output_fh` populated with CSV results for each n-gram in
`ngrams` that occurs within labelled witnesses in `catalogue`.
If `ngrams` is empty, include all n-grams.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:param ngrams: n-grams to search for
:type ngrams: `list` of `str`
:param output_fh: object to write results to
:type output_fh: file-like object
:rtype: file-like object
"""
labels = list(self._set_labels(catalogue))
label_placeholders = self._get_placeholders(labels)
if ngrams:
self._add_temporary_ngrams(ngrams)
query = constants.SELECT_SEARCH_SQL.format(label_placeholders)
else:
query = constants.SELECT_SEARCH_ALL_SQL.format(label_placeholders)
self._logger.info('Running search query')
self._logger.debug('Query: {}\nN-grams: {}'.format(
query, ', '.join(ngrams)))
self._log_query_plan(query, labels)
cursor = self._conn.execute(query, labels)
return self._csv(cursor, constants.QUERY_FIELDNAMES, output_fh)
def _set_labels(self, catalogue):
"""Returns a dictionary of the unique labels in `catalogue` and the
count of all tokens associated with each, and sets the record
of each Text to its corresponding label.
Texts that do not have a label specified are set to the empty
string.
Token counts are included in the results to allow for
semi-accurate sorting based on corpora size.
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:rtype: `dict`
"""
with self._conn:
self._conn.execute(constants.UPDATE_LABELS_SQL, [''])
labels = {}
for work, label in catalogue.items():
self._conn.execute(constants.UPDATE_LABEL_SQL, [label, work])
cursor = self._conn.execute(
constants.SELECT_TEXT_TOKEN_COUNT_SQL, [work])
token_count = cursor.fetchone()['token_count']
labels[label] = labels.get(label, 0) + token_count
return labels
@staticmethod
def _sort_labels(label_data):
"""Returns the labels in `label_data` sorted in descending order
according to the 'size' (total token count) of their referent
corpora.
:param label_data: labels (with their token counts) to sort
:type: `dict`
:rtype: `list`
"""
labels = list(label_data)
labels.sort(key=label_data.get, reverse=True)
return labels
def _update_text_record(self, witness, text_id):
"""Updates the record with `text_id` with `witness`\'s checksum and
token count.
:param withness: witness to update from
:type witness: `WitnessText`
:param text_id: database ID of Text record
:type text_id: `int`
"""
checksum = witness.get_checksum()
token_count = len(witness.get_tokens())
with self._conn:
self._conn.execute(constants.UPDATE_TEXT_SQL,
[checksum, token_count, text_id])
def validate(self, corpus, catalogue):
"""Returns True if all of the files labelled in `catalogue`
are up-to-date in the database.
:param corpus: corpus of works
:type corpus: `Corpus`
:param catalogue: catalogue matching filenames to labels
:type catalogue: `Catalogue`
:rtype: `bool`
"""
is_valid = True
for name in catalogue:
count = 0
# It is unfortunate that this creates WitnessText objects
# for each work, since that involves reading the file.
for witness in corpus.get_witnesses(name):
count += 1
filename = witness.get_filename()
row = self._conn.execute(
constants.SELECT_TEXT_SQL,
[witness.work, witness.siglum]).fetchone()
if row is None:
is_valid = False
self._logger.warning(
'No record (or n-grams) exists for {} in '
'the database'.format(filename))
elif row['checksum'] != witness.get_checksum():
is_valid = False
self._logger.warning(
'{} has changed since its n-grams were '
'added to the database'.format(filename))
if count == 0:
raise FileNotFoundError(
constants.CATALOGUE_WORK_NOT_IN_CORPUS_ERROR.format(
name))
return is_valid
| gpl-3.0 |
tomolaf/trading-with-python | lib/qtpandas.py | 77 | 7937 | '''
Easy integration of DataFrame into pyqt framework
Copyright: Jev Kuznetsov
Licence: BSD
'''
from PyQt4.QtCore import (QAbstractTableModel,Qt,QVariant,QModelIndex,SIGNAL)
from PyQt4.QtGui import (QApplication,QDialog,QVBoxLayout, QHBoxLayout, QTableView, QPushButton,
QWidget,QTableWidget, QHeaderView, QFont,QMenu,QAbstractItemView)
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self,parent=None):
super(DataFrameModel,self).__init__(parent)
self.df = DataFrame()
self.columnFormat = {} # format columns
def setFormat(self,fmt):
"""
set string formatting for the output
example : format = {'close':"%.2f"}
"""
self.columnFormat = fmt
def setDataFrame(self,dataFrame):
self.df = dataFrame
self.signalUpdate()
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not efficient)'''
self.layoutChanged.emit()
def __repr__(self):
return str(self.df)
def setData(self,index,value, role=Qt.EditRole):
if index.isValid():
row,column = index.row(), index.column()
dtype = self.df.dtypes.tolist()[column] # get column dtype
if np.issubdtype(dtype,np.float):
val,ok = value.toFloat()
elif np.issubdtype(dtype,np.int):
val,ok = value.toInt()
else:
val = value.toString()
ok = True
if ok:
self.df.iloc[row,column] = val
return True
return False
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
return Qt.ItemFlags(
QAbstractTableModel.flags(self, index)|
Qt.ItemIsEditable)
def appendRow(self, index, data=0):
self.df.loc[index,:] = data
self.signalUpdate()
def deleteRow(self, index):
idx = self.df.index[index]
#self.beginRemoveRows(QModelIndex(), index,index)
#self.df = self.df.drop(idx,axis=0)
#self.endRemoveRows()
#self.signalUpdate()
#------------- table display functions -----------------
def headerData(self,section,orientation,role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
#return self.df.index.tolist()
return str(self.df.index.tolist()[section])
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
col = self.df.ix[:,index.column()] # get a column slice first to get the right data type
elm = col[index.row()]
#elm = self.df.ix[index.row(),index.column()]
if self.df.columns[index.column()] in self.columnFormat.keys():
return QVariant(self.columnFormat[self.df.columns[index.column()]] % elm )
else:
return QVariant(str(elm))
def sort(self,nCol,order):
self.layoutAboutToBeChanged.emit()
if order == Qt.AscendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=True)
elif order == Qt.DescendingOrder:
self.df = self.df.sort(columns=self.df.columns[nCol], ascending=False)
self.layoutChanged.emit()
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class TableView(QTableView):
""" extended table view """
def __init__(self,name='TableView1', parent=None):
super(TableView,self).__init__(parent)
self.name = name
self.setSelectionBehavior(QAbstractItemView.SelectRows)
def contextMenuEvent(self, event):
menu = QMenu(self)
Action = menu.addAction("delete row")
Action.triggered.connect(self.deleteRow)
menu.exec_(event.globalPos())
def deleteRow(self):
print "Action triggered from " + self.name
print 'Selected rows:'
for idx in self.selectionModel().selectedRows():
print idx.row()
# self.model.deleteRow(idx.row())
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self,name='DataFrameTable1', parent=None):
super(DataFrameWidget,self).__init__(parent)
self.name = name
self.dataModel = DataFrameModel()
self.dataModel.setDataFrame(DataFrame())
self.dataTable = QTableView()
#self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setSortingEnabled(True)
self.dataTable.setModel(self.dataModel)
self.dataModel.signalUpdate()
#self.dataTable.setFont(QFont("Courier New", 8))
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
def setFormat(self,fmt):
""" set non-default string formatting for a column """
for colName, f in fmt.iteritems():
self.dataModel.columnFormat[colName]=f
def fitColumns(self):
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
def setDataFrame(self,df):
self.dataModel.setDataFrame(df)
def resizeColumnsToContents(self):
self.dataTable.resizeColumnsToContents()
def insertRow(self,index, data=None):
self.dataModel.appendRow(index,data)
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int':[1,2,3],'float':[1./3,2.5,3.5],'string':['a','b','c'],'nan':[np.nan,np.nan,np.nan]}
return DataFrame(data, index=Index(['AAA','BBB','CCC']))[['int','float','string','nan']]
class Form(QDialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
df = testDf() # make up some data
self.table = DataFrameWidget(parent=self)
self.table.setDataFrame(df)
#self.table.resizeColumnsToContents()
self.table.fitColumns()
self.table.setFormat({'float': '%.2f'})
#buttons
#but_add = QPushButton('Add')
but_test = QPushButton('Test')
but_test.clicked.connect(self.testFcn)
hbox = QHBoxLayout()
#hbox.addself.table(but_add)
hbox.addWidget(but_test)
layout = QVBoxLayout()
layout.addWidget(self.table)
layout.addLayout(hbox)
self.setLayout(layout)
def testFcn(self):
print 'test function'
self.table.insertRow('foo')
if __name__=='__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| bsd-3-clause |
InnovArul/codesmart | InnovaProjects/public/IITM-CS-Faculty/get_summary.py | 1 | 9021 | import os, sys
import numpy as np
import pandas as pd
import xlwt
data_root = './data'
# csv file contents format
#Index(['Sno', 'Slot', 'Additional Slot', 'Course No', 'Course Name',
# 'Instructor Name', 'Old Credit', 'New Credit', 'Room', 'Prereq',
# 'Coordinator', 'CC Chairperson', 'Strength', 'Allocation Type'],
# dtype='object')
project_font = xlwt.easyfont('colour light_blue, bold True')
core_font = xlwt.easyfont('colour red, bold True')
elective_font = xlwt.easyfont('colour green, bold True')
bg_style = xlwt.easyxf('pattern: pattern solid, fore_colour white;')
def read_csv_files():
contents = {}
# for each file in the folder, read the contents
for filename_with_ext in sorted(os.listdir(data_root)):
full_path = os.path.join(data_root, filename_with_ext)
filename = os.path.splitext(filename_with_ext)[0]
contents[filename] = pd.read_csv(full_path, delimiter=',')
return contents
def text_normalization(text):
return text.strip().upper().replace("PROF. ", "")
def collect_faculty_names(contents):
# for each semester, go through the details and collect unique faculty names
faculties = []
for semester, details in contents.items():
for faculty_name in details['Instructor Name']:
faculty_norm_name = text_normalization(faculty_name)
if faculty_norm_name not in faculties:
faculties.append(faculty_norm_name)
return sorted(faculties)
def get_type_of_course(course_id, course_name):
course_name_lower = course_name.lower()
# determine type of course (CORE, ELECTIVE)
if('project' in course_name_lower
or 'seminar' in course_name_lower):
course_type = 'PROJECT'
elif(course_id.startswith('CS1')
or course_id.startswith('CS2')
or course_id.startswith('CS3')):
course_type = 'CORE'
else:
course_type = 'ELECTIVE'
return course_type
def get_course_ids_names(contents):
# for each semester, go through the details and collect unique faculty names
course_id_names = {}
for semester, details in contents.items():
for course_id, course_name in zip(details['Course No'], details['Course Name']):
course_norm_id = text_normalization(course_id)
course_norm_name = text_normalization(course_name)
# if the course is not already seen, add it to the list
if course_norm_id not in course_id_names:
course_id_names[course_norm_id] = {}
course_id_names[course_norm_id]['name'] = []
course_id_names[course_norm_id]['type'] = []
current_course_names = course_id_names[course_norm_id]['name']
current_course_types = course_id_names[course_norm_id]['type']
# some courses would have multiple names per semester, so collect those names as well
if course_norm_name not in current_course_names:
current_course_names.append(course_norm_name)
current_course_types.append(get_type_of_course(course_norm_id, course_norm_name))
return course_id_names
def collect_courses_by_faculty(contents):
courses_by_faculty = {}
# for each semester, collect the courses for each faculty
for semester, details in contents.items():
for course_id, instructor_name, strength in zip(details['Course No'], details['Instructor Name'], details['Strength']):
course_norm_id = text_normalization(course_id)
instructor_norm_name = text_normalization(instructor_name)
# check if the instructor name is in Buffer
if instructor_norm_name not in courses_by_faculty:
courses_by_faculty[instructor_norm_name] = {}
# check if the instructor name's semester is in buffer
if semester not in courses_by_faculty[instructor_norm_name]:
courses_by_faculty[instructor_norm_name][semester] = []
if (course_norm_id, strength) not in courses_by_faculty[instructor_norm_name][semester]:
courses_by_faculty[instructor_norm_name][semester].append((course_norm_id, strength))
return courses_by_faculty
def write_faculty_course_details(wb, course_details_by_faculty, faculties, course_id_names,
start_year, end_year, all_semesters):
ws = wb.add_sheet("Course-Faculty-details")
# write the heading
rowx = 0
# Add headings with styling and frozen first row
ws.set_panes_frozen(True) # frozen headings instead of split panes
ws.set_horz_split_pos(rowx+1) # in general, freeze after last heading row
ws.set_remove_splits(True) # if user does unfreeze, don't leave a split there
heading_xf = xlwt.easyxf('font: bold on; align: wrap on, vert centre, horiz center')
headings = ['Faculty']
semesters = []
for year in range(start_year, end_year+1):
semesters.append('JAN-MAY-' + str(year))
semesters.append('JUL-NOV-' + str(year))
headings += semesters
for colx, value in enumerate(headings):
ws.write(rowx, colx, value, heading_xf)
ws.row(0).height = 1000
ws.col(0).width = 5000
for faculty in faculties:
rowx += 1
ws.row(rowx).height = 2000
colx = 0
ws.write(rowx, colx, faculty, heading_xf)
for semester in semesters:
colx += 1
ws.col(0).width = 4000
course_details = ()
if faculty in course_details_by_faculty and semester in course_details_by_faculty[faculty]:
for course_id_strength in course_details_by_faculty[faculty][semester]:
course_id = course_id_strength[0]
course_strength = course_id_strength[1]
course_types = course_id_names[course_id]['type']
font = get_font_from_course_type(course_types)
if course_strength != 0 and 'PROJECT' not in course_types:
course_details += ((course_id + ' (' + str(course_strength) + ')\n', font), )
ws.write_rich_text(rowx, colx, course_details, bg_style)
def get_font_from_course_type(course_types):
font = elective_font
if 'PROJECT' in course_types:
font = project_font
elif 'CORE' in course_types:
font = core_font
return font
def write_course_details(wb, course_id_names):
ws = wb.add_sheet("Course-details")
# write the heading
rowx = 0
# Add headings with styling and frozen first row
ws.set_panes_frozen(True) # frozen headings instead of split panes
ws.set_horz_split_pos(rowx+1) # in general, freeze after last heading row
ws.set_remove_splits(True) # if user does unfreeze, don't leave a split there
heading_xf = xlwt.easyxf('font: bold on; align: wrap on, vert centre, horiz center')
headings = ['Course ID', 'Course name']
for colx, value in enumerate(headings):
ws.write(rowx, colx, value, heading_xf)
ws.row(0).height = 1000
ws.col(0).width = 5000
course_ids = sorted(list(course_id_names.keys()))
for course_id in course_ids:
course_names = course_id_names[course_id]['name']
rowx += 1
font = get_font_from_course_type(course_id_names[course_id]['type'])
ws.write_rich_text(rowx, 0, ((course_id, font),), bg_style)
ws.write_rich_text(rowx, 1, ((', '.join(course_names), font),), bg_style)
def write_into_excel_sheet(course_details_by_faculty, faculties, course_id_names_types,
start_year, end_year, all_semesters):
wb = xlwt.Workbook()
# write the faculty course details
write_faculty_course_details(wb, course_details_by_faculty, faculties, course_id_names_types,
start_year, end_year, all_semesters)
# write the names of courses with ids
write_course_details(wb, course_id_names_types)
wb.save("myworkbook.xls")
if __name__ == '__main__':
# read the csv files containing faculty and course details
contents = read_csv_files()
# collect unique faculty names
faculties = collect_faculty_names(contents)
#print(('faculties', faculties, len(faculties)))
# collect unique course IDs
course_id_names_types = get_course_ids_names(contents)
#print(('course_id_names', course_id_names, len(course_id_names)))
# collect course details for each faculty
course_details_by_faculty = collect_courses_by_faculty(contents)
print(course_details_by_faculty['CHESTER REBEIRO'])
# write the details into excel sheet
write_into_excel_sheet(course_details_by_faculty, faculties,
course_id_names_types, 2015, 2018, list(contents))
| gpl-2.0 |
kagklis/timeseries-summary | Summarize.py | 1 | 6232 | '''
The MIT License (MIT)
Copyright (c) 2016 kagklis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from __future__ import division, print_function
from math import log, sqrt, floor
from pywt import wavedec
import numpy as np
from pandas import Series, DatetimeIndex
def limit_range(values, nm, nM):
M = max(values)
m = min(values)
oldRange = M-m
newRange = nM-nm
for i in range(len(values)):
values[i] = (((values[i] - m)*newRange)/oldRange) + nm
return(values)
##def linreg(X, Y):
## """
## return a,b in solution to y = ax + b such that root mean square distance between trend line and original points is minimized
## """
## N = len(X)
## Sx = Sy = Sxx = Syy = Sxy = 0.0
## for x, y in zip(X, Y):
## Sx = Sx + x
## Sy = Sy + y
## Sxx = Sxx + x*x
## Syy = Syy + y*y
## Sxy = Sxy + x*y
## det = Sxx * N - Sx * Sx
## return (Sxy * N - Sy * Sx)/det, (Sxx * Sy - Sx * Sxy)/det
def mean(values):
return (sum(values)*1.0)/len(values)
def stanDev(values):
m = mean(values)
total_sum = 0
for i in range(len(values)):
total_sum += (values[i]-m)**2
under_root = (total_sum*1.0)/len(values)
return (m,sqrt(under_root))
def convert2timeseries(data):
stocks = {}
for k, v in data.items():
stocks[k] = Series([d["Close"] for d in v], index=DatetimeIndex([d["Date"] for d in v],freq="D"))
return(stocks)
def summarize(data):
############# Summarization #############
key = data.keys()
key = key[0]
values = [x["Close"] for x in data.values()[0]]
V = len(values)
# number of levels for DWT
L = int(floor(log(V,2)))
m,s = stanDev(values)
values = [float((v-m)/s) for v in values]
# dictionary with the summarization attributes
stocks = {}
stocks[key] = {}
stocks[key]["extremas"] = []
stocks[key]["extremas-x"] = []
if L <= 20 and L > 1:
w = 'db'+str(L)
lof = 2*L - 1
else:
w = 'db2'
lof = 2*2 - 1
# During the same scan we produce the DWT coefficients
coeffs = wavedec(values, w, level=L)
A = coeffs[0]
D = coeffs[1:]
# All D_i unfolded
tempD = [i for array in D for i in array]
# For Spectrum Power -> Cycle/Seasonality
max_spec_p = []
max_sum_l = []
spec_ind = []
# For extramas
extr_ind = []
# Scan each D_i
for i in D:
# Turning Points
stocks[key]["extremas"].append(min(i))
extr_ind.append(np.array(i).tolist().index(min(i)))
stocks[key]["extremas"].append(max(i))
extr_ind.append(np.array(i).tolist().index(max(i)))
# Power Spectrum
spec_p = np.array(np.abs(np.fft.fft(i))**2).tolist()
max_sum_l.append(sum(spec_p))
max_val_c = max(spec_p)
max_spec_p.append(max_val_c)
spec_ind.append(spec_p.index(max_val_c))
for p in range(len(extr_ind)):
stocks[key]["extremas-x"].append(tempD.index(D[int(floor(p/2))][extr_ind[p]]))
ps = max(max_spec_p)
#stocks[key]["AL"] = A
# Regression of A_L -> trend
x = np.array(limit_range([i for i in range(len(A))],0,V))
y = np.array(A)
A = np.vstack([x, np.ones(len(x))]).T
m, c = np.linalg.lstsq(A,y)[0]
stocks[key]["trend"] = {"x":range(V), "r":limit_range([m*i + c for i in range(V)],0,1)}
# Turning points
stocks[key]["extremas"] = limit_range(stocks[key]["extremas"],0,1)
stocks[key]["extremas-x"] = limit_range(stocks[key]["extremas-x"],0,V)
# Cycle & Seasonality
stocks[key]["start_cycle"] = tempD.index(D[max_spec_p.index(ps)][spec_ind[max_spec_p.index(ps)]])
stocks[key]["cycle"] = spec_ind[max_spec_p.index(ps)]
mps = max(max_sum_l)
index = max_sum_l.index(mps)
#stocks[key]["Ds"] = limit_range(np.array(D[max_spec_p.index(ps)]).tolist(),0,1)
#stocks[key]["Ds-x"] = limit_range([tempD.index(z) for z in D[max_spec_p.index(ps)]],0,V)
stocks[key]["Ds"] = limit_range(np.array(D[index]).tolist(),0,1)
stocks[key]["Ds-x"] = limit_range([tempD.index(z) for z in D[index]],0,V)
values = [x["Close"] for x in data.values()[0]]
# NCSS
vol = []
for i in range(1,V):
vol.append(100*(log(values[i])-log(values[i-1])))
m = mean(vol)
# Volatility
vol = [((z-m)**2) for z in vol]
stocks[key]["volatility"] = vol
coeffs = wavedec(vol, w, level=L)
A = coeffs[0]
D = coeffs[1:]
# All D_i unfolded
tempD = [i for array in D for i in array]
# NCSS: for change variance
ncss_ind = []
max_ncss = []
# Scan each D_i
for i in D:
pk = []
nn = []
par = sum([tempD[s]**2 for s in range(lof, V-1)])
for j in range(L, V-1):
pp =(sum([tempD[z]**2 for z in range(lof, j)])*1.0)/par
pk.append(max( [ ((j+1)/(V-1))-pp, pp - (j/(V-1)) ]))
nn.append(j)
# NCSS Index Info
max_pk = max(pk)
max_ncss.append(max_pk)
ncss_ind.append(nn[pk.index(max_pk)])
stocks[key]["ncss"] = ncss_ind[max_ncss.index(max(max_ncss))]
timeserie = convert2timeseries(data)
return(timeserie, stocks)
| mit |
kcavagnolo/astroML | book_figures/chapter5/fig_bayes_blocks.py | 3 | 2860 | """
Distribution Representation Comparison
--------------------------------------
Figure 5.21
Comparison of Knuth's histogram and a Bayesian blocks histogram. The adaptive
bin widths of the Bayesian blocks histogram yield a better representation of
the underlying data, especially with fewer points.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import stats
from astroML.plotting import hist
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Generate our data: a mix of several Cauchy distributions
np.random.seed(0)
N = 10000
mu_gamma_f = [(5, 1.0, 0.1),
(7, 0.5, 0.5),
(9, 0.1, 0.1),
(12, 0.5, 0.2),
(14, 1.0, 0.1)]
true_pdf = lambda x: sum([f * stats.cauchy(mu, gamma).pdf(x)
for (mu, gamma, f) in mu_gamma_f])
x = np.concatenate([stats.cauchy(mu, gamma).rvs(int(f * N))
for (mu, gamma, f) in mu_gamma_f])
np.random.shuffle(x)
x = x[x > -10]
x = x[x < 30]
#------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(bottom=0.08, top=0.95, right=0.95, hspace=0.1)
N_values = (500, 5000)
subplots = (211, 212)
for N, subplot in zip(N_values, subplots):
ax = fig.add_subplot(subplot)
xN = x[:N]
t = np.linspace(-10, 30, 1000)
# plot the results
ax.plot(xN, -0.005 * np.ones(len(xN)), '|k')
hist(xN, bins='knuth', ax=ax, normed=True,
histtype='stepfilled', alpha=0.3,
label='Knuth Histogram')
hist(xN, bins='blocks', ax=ax, normed=True,
histtype='step', color='k',
label="Bayesian Blocks")
ax.plot(t, true_pdf(t), '-', color='black',
label="Generating Distribution")
# label the plot
ax.text(0.02, 0.95, "%i points" % N, ha='left', va='top',
transform=ax.transAxes)
ax.set_ylabel('$p(x)$')
ax.legend(loc='upper right', prop=dict(size=8))
if subplot == 212:
ax.set_xlabel('$x$')
ax.set_xlim(0, 20)
ax.set_ylim(-0.01, 0.4001)
plt.show()
| bsd-2-clause |
JohanComparat/pySU | galaxy/python/LineFittingLibrary.py | 1 | 49598 | """
.. class:: LineFittingLibrary
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
This class contains a variety of function to fit emission or absorption lines in galaxy spectra.
"""
from scipy.optimize import curve_fit
import numpy as n
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
from scipy.interpolate import interp1d
from scipy.integrate import quad
# Location of the emission lines of interest:
import astropy.constants as cc
c=cc.c.value # speed of light
#from lineList import *
class LineFittingLibrary:
"""
Loads the environement proper to fit lines :
* Gaussian line model
* Lorentzian line model
* pseudoVoigt line model
* conversion magnitude AB to flux : flambda to fnu
:param dV: the default value (def: -9999.99)
"""
def __init__(self,dV=-9999.99):
self.dV=dV # default value put in the catalogs
# Line models
self.gaussianLine=lambda aa,sigma,F0,a0,continu : continu + F0*(n.e**( -(aa-a0)**2. / (2.*sigma**2.)))/ (abs(sigma)*(2.*n.pi)**0.5)
self.gaussianLineNC=lambda aa,sigma,F0,a0 : F0*(n.e**(-(aa-a0)**2./ (2.*sigma**2.) ))/(abs(sigma)*(2.*n.pi)**0.5)
self.lorentzLine=lambda aa,gamma,F0,a0,continu : continu + F0 * abs(gamma) / (n.pi* ((aa-a0)**2 +gamma**2))
self.pseudoVoigtLine=lambda aa,fwhm,F0,a0,continu,sh : continu + F0*abs(sh)/(1+ ((aa-a0) /(fwhm/2.))**2.)+F0*(1-abs(sh))*n.e**( -n.log(2)* ((aa-a0)/(fwhm/2.))**2.)
# conversion magnitude flux
self.fnu = lambda mAB : 10**(-(mAB+48.6)/2.5) # erg/cm2/s/Hz
self.flambda= lambda mAB, ll : 10**10 * c * self.fnu(mAB) / ll**2. # erg/cm2/s/A
def integrateMAG(self,wl,spec1d,err1d,filt,xmin=5000.,xmax=7500.):
"""
Integrates a spectrum over a filter curve.
:param wl: wavelength (array)
:param spec1d: flux, f lambda convention (array)
:param err1d: flux error (array)
:param filt: filter curve (interpolation 1d)
:param xmin: lower integration boundary (Angstrom)
:param xmax: higher integration boundary (Angstrom)
returns :
* integral of filter curve
* integral of spec1d
* integral of spec1d * filter curve
* integral of (spec1d + err1d) * filter curve
* integral of (spec1d - err1d) * filter curve
"""
filtTp=filt(wl)
Lfilt=quad(filt,xmin,xmax,limit=500000)[0]
toInt=interp1d(wl,spec1d)
Lspec=quad(toInt,xmin,xmax,limit=500000)[0]
toInt=interp1d(wl,spec1d*filtTp)
Lg=quad(toInt,xmin,xmax,limit=500000)[0]
toInt=interp1d(wl,(spec1d+err1d)*filtTp)
LgU=quad(toInt,xmin,xmax,limit=500000)[0]
toInt=interp1d(wl,(spec1d-err1d)*filtTp)
LgL=quad(toInt,xmin,xmax,limit=500000)[0]
return Lfilt, Lspec, Lg, LgU, LgL
def getFractionObsMed(self,mag,lambdaMag,fl,flErr):
"""
Computes the fraction of light captured by the spectrograph in a broad band by comparing the median flux in the broad band to the magnitude converted to flux at the mean wavelength of the broad band.
:param mag: magnitude AB (float, mag)
:param lambdaMag: mean wavelength covered by the magnitude AB (float, Angstrom)
:param fl: flux observed in the broad band (array, f lambda)
:param flErr: error on the flux observed in the broad band (array, f lambda)
Returns :
* fraction of light observed
* error on the fraction of light observed
"""
goal=self.flambda(mag,lambdaMag)
fo=goal/n.median(fl)
fo_err=goal/n.median(flErr)
return fo, fo_err
def getFractionObsMag(self,mag,lambdaMag,filter,xmin,xmax,wl,fl,flErr):
"""
Computes the fraction of light captured by the spectrograph in a broad band by comparing the integrated flux in the broad band to the magnitude.
:param mag: magnitude AB (float, mag)
:param lambdaMag: mean wavelength covered by the magnitude AB (float, Angstrom)
:param fl: flux observed in the broad band (array, f lambda)
:param flErr: error on the flux observed in the broad band (array, f lambda)
:param filt: filter curve (interpolation 1d)
:param xmin: lower integration boundary (Angstrom)
:param xmax: higher integration boundary (Angstrom)
Returns :
* fraction of light observed
* error on the fraction of light observed
"""
goal=self.flambda(mag,lambdaMag)
Lfilt, Lspec, Lg, LgU, LgL=self.integrateMAG(wl,fl,flErr,filter,xmin,xmax)
fo=Lg/Lfilt/goal
fo_err=(LgU/Lfilt/goal-LgL/Lfilt/goal)/2
return fo, fo_err
def plotLineFit(self,wl,fl,flErr,lineModel,a0,datI,path_to_fig="plot.pdf", title=" - ", fitWidth = 70., DLC=50, doublet=False):
"""
Plots a spectrum and the emission line model fitted.
:param wl: wavelength (array, Angstrom)
:param fl: flux observed in the broad band (array, f lambda)
:param flErr: error on the flux observed in the broad band (array, f lambda)
:param lineModel: model output by the line fitting functions (array, (2,N) wavelength and flux)
:param a0: position of the peak of the line
:param path_to_fig: where you wish to save the figure
"""
p.figure(0,(8,4))
p.plot(wl,fl,'k')
p.plot(wl,fl+flErr,'g--')
p.plot(wl,fl-flErr,'g--')
p.axvline(a0, c='k')
p.axvline(a0 - fitWidth/2., c='k')
p.axvline(a0 - fitWidth/2. - DLC, c='k')
p.axvline(a0 + fitWidth/2., c='k')
p.axvline(a0 + fitWidth/2. + DLC, c='k')
p.plot(lineModel[0], lineModel[1],'r')
p.xlim((a0 - fitWidth/2. - DLC - 5, a0 + fitWidth/2. + DLC + 5))
p.yscale('log')
p.ylim((n.max([lineModel[1].min() / 5., 1e-18]), lineModel[1].max() * 5.))
x_model = n.arange(a0 - fitWidth/2. - DLC, a0 + fitWidth/2. + DLC, 0.1)
if doublet:
a0_0, a0_1, flux, fluxErr, sigma, sigmaErr, continu, continuErr, EW, share, shareErr, fd_a0_l, fd_a0_r, chi2, ndof= datI
y_model_1 = continu/2. +self.gaussianLineNC( x_model, sigma, share*flux, a0_1)
y_model_2 = continu/2. + self.gaussianLineNC(x_model, sigma, (1-share) * flux, a0_0)
p.title(title+" doublet")
p.plot(x_model, y_model_1, 'c', ls='dashed', lw=2)
p.plot(x_model, y_model_2, 'm', ls='dotted', lw=2)
#p.plot(x_model, y_model_1 + y_model_2, '')
else:
a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof = datI
y_model = self.gaussianLine(x_model, sigma, flux, a0, continu)
p.title(title)
p.plot(x_model, y_model, 'm--')
#p.savefig(path_to_fig)
p.show()
def fit_Line_position_C0noise(self,wl,spec1d,err1d,a0=5007.,lineName="AL",fitWidth=20,DLC=20, p0_sigma=15.,p0_flux=8e-17,p0_share=0.5,continuumSide="left",model="gaussian"):
"""
fits a line profile to a spectrum where the error model is takes the value of the continuum.
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). a0 is fitted.
:param lineName: suffix characterizing the line in the headers of the output
:param fitWidth: width in Angstrom around the line where the fit is performed, default 20 Angstrom
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share of Gaussian and Lorentzian model. Only used if the line is fitted with a pseudoVoigt profile width (def: 0.5 no units)
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt".
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
headerPV=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
outPutNF_PV=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
if continuumSide=="left":
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0-DLC-fitWidth)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,a0,continu : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux,a0,continu])
if model=="lorentz":
flMod=lambda aa,sigma,F0,a0,continu : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux, a0,continu])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh,a0,continu : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=continu*n.ones_like(err1d[domainLine]),maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3])
var=continu*n.ones_like(err1d[domainLine])
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
a0=out[0][2]
a0_err=out[1][2][2]**0.5
continu=out[0][3]
continuErr=out[1][3][3]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=continu*n.ones_like(err1d[domainLine])
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
elif continuumSide=="right" :
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0+fitWidth)&(wl<a0+DLC+fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,a0,continu : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux,a0,continu])
if model=="lorentz":
flMod=lambda aa,sigma,F0,a0,continu : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux, a0,continu])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh,a0,continu : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=continu*n.ones_like(err1d[domainLine]),maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3])
var=continu*n.ones_like(err1d[domainLine])
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
a0=out[0][2]
a0_err=out[1][2][2]**0.5
continu=out[0][3]
continuErr=out[1][3][3]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=continu*n.ones_like(err1d[domainLine])
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
def fit_Line_position(self,wl,spec1d,err1d,a0=5007.,lineName="AL",fitWidth=20,DLC=20, p0_sigma=15.,p0_flux=8e-17,p0_share=0.5,continuumSide="left",model="gaussian"):
"""
fits a line profile to a spectrum around a fixed line position
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). a0 is not fitted, it is given.
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share of Gaussian and Lorentzian model. Only used if the line is fitted with a pseudoVoigt profile width (def: 0.5 no units)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt".
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
headerPV=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
outPutNF_PV=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
if continuumSide=="left":
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0-DLC-fitWidth)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,a0,continu : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux,a0,continu])
if model=="lorentz":
flMod=lambda aa,sigma,F0,a0,continu : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux, a0,continu])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh,a0,continu : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
a0=out[0][2]
a0_err=out[1][2][2]**0.5
continu=out[0][3]
continuErr=out[1][3][3]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
elif continuumSide=="right" :
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0+fitWidth)&(wl<a0+DLC+fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,a0,continu : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux,a0,continu])
if model=="lorentz":
flMod=lambda aa,sigma,F0,a0,continu : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux, a0,continu])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh,a0,continu : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
a0=out[0][2]
a0_err=out[1][2][2]**0.5
continu=out[0][3]
continuErr=out[1][3][3]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
def fit_Line(self,wl,spec1d,err1d,a0,lineName="AL",fitWidth=20,DLC=20, p0_sigma=15.,p0_flux=8e-17,p0_share=0.5,continuumSide="left",model="gaussian"):
"""
fits a line profile to a spectrum around a fixed line position
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). a0 is not fitted, it is given.
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share of Gaussian and Lorentzian model. Only used if the line is fitted with a pseudoVoigt profile width (def: 0.5 no units)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt".
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
headerPV=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
outPutNF_PV=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
if continuumSide=="left":
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0-DLC-fitWidth)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0 : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux])
if model=="lorentz":
flMod=lambda aa,sigma,F0 : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
elif continuumSide=="right" :
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0+fitWidth)&(wl<a0+DLC+fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0 : self.gaussianLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux])
if model=="lorentz":
flMod=lambda aa,sigma,F0 : self.lorentzLine(aa,sigma,F0,a0,continu)
p0=n.array([p0_sigma,p0_flux])
if model=="pseudoVoigt":
flMod=lambda aa,sigma,F0,sh : self.pseudoVoigtLine(aa,sigma,F0,a0,continu,sh)
p0=n.array([p0_sigma,p0_flux,p0_share])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray and ( model=="gaussian" or model=="lorentz") :
model1=flMod(wl[domainLine],out[0][0],out[0][1])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
elif model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
elif out[1].__class__==n.ndarray and model=="pseudoVoigt" :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
if model=="gaussian" or model=="lorentz" :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
if model=="pseudoVoigt" :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
#print "not enough space to fit the line"
if model=="gaussian" or model=="lorentz" :
return outPutNF,modNF,header
if model=="pseudoVoigt" :
return outPutNF_PV,modNF,headerPV
def fit_Line_OIIdoublet(self,wl,spec1d,err1d,a0=3726.0321735398957,lineName="OII",fitWidth=20,DLC=20,p0_sigma=4.,p0_flux=1e-16,p0_share=0.58,model="gaussian"):
"""
fits the [OII] doublet line profile
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). 2 positions given.
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share between the two [OII] lines. (def: 0.58)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt"
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0a "+lineName+"_a0b "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0[0], a0[1], self.dV,self.dV, self.dV,self.dV, self.dV, self.dV,self.dV, self.dV,self.dV, self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
domainLine=(wl>a0[0]-fitWidth)&(wl<a0[1]+fitWidth)
domainCont=(wl>a0[0]-DLC-fitWidth)&(wl<a0[0]-fitWidth)
if a0[0]<wl.max()-DLC and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,sh :continu+ self.gaussianLineNC(aa,sigma,(1-sh)*F0,a0[0])+self.gaussianLineNC(aa,sigma,sh*F0,a0[1])
if model=="lorentz":
flMod=lambda aa,sigma,F0,sh : self.lorentzLine(aa,sigma,(1-sh)*F0,a0[0],continu/2.)+self.lorentzLine(aa,sigma,sh*F0,a0[1],continu/2.)
index=n.searchsorted(wl,a0[1])
fd_a0_r=spec1d[index]
fd_a0_l=spec1d[index]
index=n.searchsorted(wl,a0[0])
if fd_a0_r>continu or fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=n.array([p0_sigma,p0_flux,p0_share]),sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
EW=flux/continu
outPut=n.array([a0[0],a0[1],flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return n.array([a0[0],a0[1],self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
return n.array([a0[0],a0[1],self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
#print "not enough space to fit the line"
return outPutNF,modNF,header
def fit_Line_OIIdoublet_position(self,wl,spec1d,err1d,a0=3726.0321,lineName="O2_3728",fitWidth=20,DLC=20,p0_sigma=4.,p0_flux=1e-16,p0_share=0.58,model="gaussian"):
"""
fits the [OII] doublet line profile
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). 2 positions given.
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share between the two [OII] lines. (def: 0.58)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt"
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0a "+lineName+"_a0b "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, a0+2.782374, self.dV,self.dV, self.dV,self.dV, self.dV, self.dV,self.dV, self.dV,self.dV, self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
domainLine=(wl>a0-fitWidth)&(wl<a0+2.782374+fitWidth/2.)
domainCont=(wl>a0-fitWidth-DLC)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,sh,a0,continu :continu+ self.gaussianLineNC(aa,sigma,(1-sh)*F0,a0)+self.gaussianLineNC(aa,sigma,sh*F0,a0+2.782374)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
index=n.searchsorted(wl,a0+2.782374)
fd_a0_r=spec1d[index]
index=n.searchsorted(wl,a0)
fd_a0_l=spec1d[index]
if fd_a0_r>continu or fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu]),sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,a0+2.782374,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return n.array([a0,a0+2.782374,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
return n.array([a0,a0+2.782374,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
#print "not enough space to fit the line"
return outPutNF,modNF,header
def fit_Line_OIIdoublet_position_C0noise(self,wl,spec1d,err1d,a0=3726.0321,lineName="O2_3728",fitWidth=20,DLC=20,p0_sigma=4.,p0_flux=1e-16,p0_share=0.58,model="gaussian"):
"""
fits the [OII] doublet line profile
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted). 2 positions given.
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param p0_share: prior on the share between the two [OII] lines. (def: 0.58)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
:param model: line model to be fitted : "gaussian", "lorentz" or "pseudoVoigt"
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0a "+lineName+"_a0b "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, a0+2.782374, self.dV,self.dV, self.dV,self.dV, self.dV, self.dV,self.dV, self.dV,self.dV, self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
domainLine=(wl>a0-fitWidth)&(wl<a0+2.782374+fitWidth/2.)
domainCont=(wl>a0-fitWidth-DLC)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
if model=="gaussian":
flMod=lambda aa,sigma,F0,sh,a0,continu :continu+ self.gaussianLineNC(aa,sigma,(1-sh)*F0,a0)+self.gaussianLineNC(aa,sigma,sh*F0,a0+2.782374)
p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu])
index=n.searchsorted(wl,a0+2.782374)
fd_a0_r=spec1d[index]
index=n.searchsorted(wl,a0)
fd_a0_l=spec1d[index]
if fd_a0_r>continu or fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=n.array([p0_sigma,p0_flux,p0_share,a0,continu]),sigma=continu*n.ones_like(err1d[domainLine]),maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4])
var=continu*n.ones_like(err1d[domainLine])
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
share=out[0][2]
shareErr=out[1][2][2]**0.5
a0=out[0][3]
a0_err=out[1][3][3]**0.5
continu=out[0][4]
continuErr=out[1][4][4]**0.5
EW=flux/continu
outPut=n.array([a0,a0+2.782374,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,share,shareErr,fd_a0_l,fd_a0_r,chi2,ndof])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return n.array([a0,a0+2.782374,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
return n.array([a0,a0+2.782374,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,self.dV,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,header
else :
#print "not enough space to fit the line"
return outPutNF,modNF,header
def fit_recLine(self,wl,spec1d,err1d,a0,lineName="AL",fitWidth=20,DLC=20,p0_sigma=5.,p0_flux=5e-17,continuumSide="left"):
"""
fits a recombination line profile : emission and absorption modeled by Gaussians. Only for high SNR spectra.
:param wl: wavelength (array, Angstrom)
:param spec1d: flux observed in the broad band (array, f lambda)
:param err1d: error on the flux observed in the broad band (array, f lambda)
:param a0: expected position of the peak of the line in the observed frame (redshifted)
:param lineName: suffix characterizing the line in the headers of the output
:param DLC: wavelength extent to fit the continuum around the line. (def: 230 Angstrom)
:param p0_sigma: prior on the line width in A (def: 15 A)
:param p0_flux: prior on the line flux in erg/cm2/s/A (def: 8e-17)
:param continuumSide: "left" = bluewards of the line or "right" = redwards of the line
Returns :
* array 1 with the parameters of the model
* array 2 with the model (wavelength, flux model)
* header corresponding to the array 1
"""
header=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
headerPV=" "+lineName+"_a0 "+lineName+"_flux "+lineName+"_fluxErr "+lineName+"_sigma "+lineName+"_sigmaErr "+lineName+"_continu "+lineName+"_continuErr "+lineName+"_EW "+lineName+"_share "+lineName+"_shareErr_"+" fd_a0_l "+lineName+"_fd_a0_r "+lineName+"_chi2 "+lineName+"_ndof"
outPutNF=n.array([a0, self.dV,self.dV,self.dV, self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV,self.dV])
modNF=n.array([self.dV,self.dV])
if continuumSide=="left":
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0-DLC)&(wl<a0-fitWidth)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
# model with absorption
flMod=lambda aa,sigma,F0,sigmaL,F0L,a0L,sigmaR,F0R,a0R : continu + self.gaussianLineNC(aa,sigma,F0,a0) - self.gaussianLineNC(aa,sigmaL,F0L,a0L) - self.gaussianLineNC(aa,sigmaR,F0R,a0R)
p0=n.array([p0_sigma,p0_flux,p0_sigma/2.,p0_flux/5.,a0-5, p0_sigma/2.,p0_flux/5.,a0-5])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4],out[0][5],out[0][6],out[0][7])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)-8
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
else :
#print "not enough space to fit the line"
return outPutNF,modNF,header
elif continuumSide=="right" :
domainLine=(wl>a0-fitWidth)&(wl<a0+fitWidth)
domainCont=(wl>a0+fitWidth)&(wl<a0+DLC)
if a0<wl.max()-DLC and a0>wl.min()+fitWidth and a0<wl.max()-fitWidth and len(domainLine.nonzero()[0])>2 and len(domainCont.nonzero()[0])>2 :
continu=n.median(spec1d[domainCont])
continuErr=n.median(err1d[domainCont])
# model with absorption
flMod=lambda aa,sigma,F0,sigmaL,F0L,a0L,sigmaR,F0R,a0R : continu + self.gaussianLineNC(aa,sigma,F0,a0) - self.gaussianLineNC(aa,sigmaL,F0L,a0L) - self.gaussianLineNC(aa,sigmaR,F0R,a0R)
p0=n.array([p0_sigma,p0_flux,p0_sigma/2.,p0_flux/5.,a0-5, p0_sigma/2.,p0_flux/5.,a0-5])
interp=interp1d(wl,spec1d)
fd_a0_r=interp(a0+0.2)
fd_a0_l=interp(a0-0.2)
if fd_a0_r>continu and fd_a0_l>continu :
out = curve_fit(flMod, wl[domainLine], spec1d[domainLine], p0=p0,sigma=err1d[domainLine],maxfev=1000000000, gtol=1.49012e-8)
if out[1].__class__==n.ndarray :
model1=flMod(wl[domainLine],out[0][0],out[0][1],out[0][2],out[0][3],out[0][4],out[0][5],out[0][6],out[0][7])
var=err1d[domainLine]
chi2=n.sum(abs(model1-spec1d[domainLine])**2./var**2.)
ndof=len(var)-8
sigma=out[0][0]
sigmaErr=out[1][0][0]**0.5
flux=out[0][1]
fluxErr=out[1][1][1]**0.5
EW=flux/continu
outPut=n.array([a0,flux,fluxErr,sigma,sigmaErr,continu,continuErr,EW,fd_a0_l,fd_a0_r,chi2,ndof ])
mod=n.array([wl[domainLine],model1])
return outPut,mod,header
else :
return n.array([a0,self.dV,self.dV,self.dV,self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV]),modNF,headerPV
else :
return n.array([a0,self.dV,self.dV,self.dV, self.dV,continu,continuErr,self.dV,fd_a0_l,fd_a0_r,self.dV,self.dV ]),modNF,header
else :
#print "not enough space to fit the line"
return outPutNF,modNF,header
| cc0-1.0 |
Midnighter/pyorganism | scripts/trn_randomization_analysis.py | 1 | 14324 | #!/usr/bin/env python
# -*- coding: utf-8
from __future__ import (absolute_import, unicode_literals)
import os
import sys
import logging
import argparse
from logging.config import dictConfig
from glob import glob
from random import choice
import numpy as np
import networkx as nx
import pandas as pd
from IPython.parallel import (interactive, Client)
from progressbar import (ProgressBar, Timer, SimpleProgress, Bar, Percentage, ETA)
import pyorganism as pyorg
from pyorganism.regulation import trn2grn
from meb.utils.network.randomisation import NetworkRewiring
from meb.utils.network.subgraphs import triadic_census
LOGGER = logging.getLogger()
LOGGER.addHandler(logging.StreamHandler())
###############################################################################
# Randomisation
###############################################################################
def load_data(locations):
tr_nets = dict()
versions = list()
drop = list()
for path in locations:
ver = os.path.basename(path)
if not ver:
ver = os.path.basename(os.path.dirname(path))
try:
pyorg.read_pickle(os.path.join(path, "genes.pkl"))
pyorg.read_pickle(os.path.join(path, "transcription_factors.pkl"))
trn = pyorg.read_pickle(os.path.join(path, "trn.pkl"))
tr_nets[ver] = trn
versions.append(ver)
except IOError:
drop.append(path)
continue
for path in drop:
locations.remove(path)
return (tr_nets, versions)
@interactive
def rewire(version):
net = globals()["TRN"][version].copy()
prob = globals()["prob"]
rnd = np.random.sample
nodes = sorted(net.nodes_iter())
regulating = {node for (node, deg) in net.out_degree_iter() if deg > 0}
regulated = set(nodes) - regulating
edges = net.edges(data=True, keys=True)
for (u, v, key, data) in edges:
if rnd() < prob:
targets = list(regulated - set(net.successors(u)))
new = choice(targets)
while net.has_edge(u, new, key):
new = choice(targets)
net.remove_edge(u, v, key)
net.add_edge(u, new, key=key, **data)
return net
def rewiring(lb_view, versions, args):
bar = ProgressBar(maxval=args.rnd_num, widgets=[Timer(), " ",
SimpleProgress(), " ", Percentage(), " ", Bar(), " ", ETA()])
rands = list()
for ver in versions:
LOGGER.info(ver)
res_it = lb_view.map(rewire, [ver] * args.rnd_num, block=False, ordered=False)
bar.start()
for rng in res_it:
rands.append(rng)
bar += 1
bar.finish()
pyorg.write_pickle(rands, os.path.join(args.out_path, ver,
"trn_rewired_{0:.1f}.pkl".format(args.prob)))
lb_view.purge_results("all")
del rands[:] # activate garbage collection in loop
@interactive
def null_model(version):
net = nx.DiGraph(globals()["TRN"][version])
flips = globals()["flip_num"]
nx.convert_node_labels_to_integers(net, ordering="sorted",
label_attribute="element")
rewirer = NetworkRewiring()
(rnd_net, flip_rate) = rewirer.randomise(net, flip=flips, copy=False)
return (rnd_net, flip_rate)
def randomisation(lb_view, versions, args):
bar = ProgressBar(maxval=args.rnd_num, widgets=[Timer(), " ",
SimpleProgress(), " ", Percentage(), " ", Bar(), " ", ETA()])
for ver in versions:
LOGGER.info(ver)
res_it = lb_view.map(null_model, [ver] * args.rnd_num, block=False, ordered=False)
bar.start()
rands = list()
success = list()
for (rnd_net, flip_rate) in res_it:
rands.append(rnd_net)
success.append(flip_rate)
bar +=1
bar.finish()
lb_view.purge_results("all")
LOGGER.info("mean flip success rate: %.3G +/- %.3G", np.mean(success),
np.std(success))
pyorg.write_pickle(rands, os.path.join(args.out_path, ver,
"trn_random.pkl"))
del rands[:] # activate garbage collection in loop
def main_random(rc, args):
locations = sorted(glob(os.path.join(args.in_path, args.glob)))
locations = [os.path.abspath(loc) for loc in locations]
LOGGER.info("loading data")
(tr_nets, versions) = load_data(locations)
LOGGER.info("remote preparation")
dv = rc.direct_view()
dv.execute("import os;"\
"from random import choice;"\
"import numpy as np;"\
"import networkx as nx;"\
"import pyorganism as pyorg;"\
"from meb.utils.network.randomisation import NetworkRewiring"\
"import logging; from IPython.config import Application;"\
"LOGGER = Application.instance().log;"\
"LOGGER.setLevel(logging.{level});".format(level=args.log_level),
block=True)
dv.push({"load_data": load_data, "locations": locations}, block=True)
dv.execute("(TRN, versions) = load_data(locations);", block=True)
lv = rc.load_balanced_view()
if args.run_rewire:
LOGGER.info("rewiring")
dv.push({"rewire": rewire, "prob": args.prob}, block=True)
rewiring(lv, versions, args)
if args.run_rnd:
LOGGER.info("randomisation")
dv.push({"null_model": null_model, "flip_num": args.flip_num}, block=True)
randomisation(lv, versions, args)
###############################################################################
# Analysis
###############################################################################
@interactive
def stats(grn, version, description):
nodes = sorted(grn.nodes_iter())
regulating = {node for (node, deg) in grn.out_degree_iter() if deg > 0}
regulated = set(nodes) - regulating
components = sorted(nx.weakly_connected_components(grn), key=len,
reverse=True)
data = dict()
census = triadic_census(grn)
forward = census["030T"]
feedback = census["030C"]
cycles = list(nx.simple_cycles(grn))
in_deg = [grn.in_degree(node) for node in regulated]
out_deg = [grn.out_degree(node) for node in regulating]
data["version"] = version
data["num_components"] = len(components)
data["largest_component"] = len(components[0])
data["feed_forward"] = forward
data["feedback"] = feedback
data["cycles"] = len(cycles)
data["regulated_in_deg"] = np.mean(in_deg)
data["regulating_out_deg"] = np.mean(out_deg)
data["null_model"] = description
stats = pd.DataFrame(data, index=[1])
return stats
@interactive
def err_stats(version, description):
data = dict()
data["version"] = version
data["num_components"] = None
data["largest_component"] = None
data["feed_forward"] = None
data["feedback"] = None
data["cycles"] = None
data["regulated_in_deg"] = None
data["regulating_out_deg"] = None
data["null_model"] = description
return pd.DataFrame(data, index=[1])
@interactive
def null_stats(base_dir, task):
glbls = globals()
prob = glbls["prob"]
choose = glbls["choose"]
logger = glbls["LOGGER"]
ver = os.path.basename(base_dir)
if not ver:
ver = os.path.basename(os.path.dirname(base_dir))
if task == "rewired":
desc = "rewired {0:.1f}".format(prob)
filename = "trn_rewired_{0:.1f}.pkl".format(prob)
elif task == "switch":
desc = "switch"
filename = "trn_random.pkl"
try:
nets = pyorg.read_pickle(os.path.join(base_dir, filename))
except (OSError, IOError, EOFError):
(err, msg, trace) = sys.exc_info()
logger.error("Version: '%s' Task: '%s'", ver, task)
logger.error(str(msg))
return err_stats(ver, desc)
chosen = random.sample(nets, choose)
logger.info("%d/%d random networks", len(chosen), len(nets))
nets = [trn2grn(net) for net in chosen]
return pd.concat([stats(net, ver, desc) for net in nets], ignore_index=True)
def filter_tasks(locations, tasks, results, num_expect=1000):
if results.empty:
return ([loc for loc in locations for method in tasks],
[method for loc in locations for method in tasks])
paths = list()
methods = list()
for loc in locations:
ver = os.path.basename(loc)
if not ver:
ver = os.path.basename(os.path.dirname(loc))
mask = (results["version"] == ver)
for t in tasks:
if sum(t in dscr for dscr in results.loc[mask,
"null_model"]) != num_expect:
paths.append(loc)
methods.append(t)
return (paths, methods)
def main_analysis(rc, args):
locations = sorted(glob(os.path.join(args.in_path, args.glob)))
locations = [os.path.abspath(loc) for loc in locations]
LOGGER.info("remote preparation")
dv = rc.direct_view()
dv.execute("import os;"\
"import sys;"\
"import random;"\
"import numpy as np;"\
"import networkx as nx;"\
"import pandas as pd;"\
"from pyorganism.regulation import trn2grn;"\
"from meb.utils.network.subgraphs import triadic_census;"\
"import pyorganism as pyorg;"\
"import logging; from IPython.config import Application;"\
"LOGGER = Application.instance().log;"\
"LOGGER.setLevel(logging.{level});".format(level=args.log_level),
block=True)
dv.push({"stats": stats, "err_stats": err_stats, "choose": args.choose}, block=True)
tasks = list()
if args.run_rewire:
dv.push({"prob": args.prob}, block=True)
tasks.append("rewired")
if args.run_rnd:
tasks.append("switch")
filename = os.path.join(args.out_path, args.file)
if os.path.exists(filename):
result = pd.read_csv(filename, sep=str(";"), dtype={"version": str},
encoding=args.encoding)
else:
result = pd.DataFrame(columns=["version", "num_components",
"largest_component", "feed_forward", "feedback", "cycles",
"regulated_in_deg", "regulating_out_deg", "null_model"])
(locations, methods) = filter_tasks(locations, tasks, result, args.choose)
lv = rc.load_balanced_view()
res_it = lv.map(null_stats, locations, methods, block=False, ordered=False)
bar = ProgressBar(maxval=len(locations) * 2, widgets=[Timer(), " ",
SimpleProgress(), " ", Percentage(), " ", Bar(), " ", ETA()]).start()
for df in res_it:
result = result.append(df, ignore_index=True)
result.to_csv(filename, header=True, index=False,
sep=str(";"), encoding=args.encoding)
bar += 1
bar.finish()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=None)
parser.add_argument("-v", "--version", action="version", version="0.1")
parser.add_argument("--profile", dest="profile", default="default",
help="IPython profile to connect to cluster (default: %(default)s)")
parser.add_argument("--cluster-id", dest="cluster_id", default=None,
help="IPython cluster-id to connect to (default: %(default)s)")
parser.add_argument("--log-level", dest="log_level", default="INFO",
help="Log level, i.e., DEBUG, INFO, WARN, ERROR, CRITICAL (default: %(default)s)")
parser.add_argument("--encoding", dest="encoding", default="utf-8",
help="File encoding to assume (default: %(default)s)")
parser.add_argument("--no-rewire", dest="run_rewire", action="store_false",
default=True, help="Avoid creation or analysis of rewired TRNs")
parser.add_argument("--no-randomize", dest="run_rnd", action="store_false",
default=True, help="Avoid creation or analysis of randomized TRNs")
parser.add_argument("-g", "--glob", dest="glob", default="[0-9].[0-9]",
help="Glob pattern for RegulonDB version directories (default: %(default)s)")
parser.add_argument("-i", "--input", dest="in_path", default="RegulonDBObjects",
help="Base directory for data input (default: %(default)s)")
parser.add_argument("-o", "--output", dest="out_path", default="RegulonDBObjects",
help="Base directory for data output (default: %(default)s)")
subparsers = parser.add_subparsers(help="sub-command help")
# randomization
parser_rnd = subparsers.add_parser("randomization",
help="Rewire or randomize the TRN as a statistical null model")
parser_rnd.add_argument("-r", "--rnd-num", dest="rnd_num",
default=int(1E03), type=int,
help="Number of rewired or randomized TRNs to generate (default: %(default)s)")
parser_rnd.add_argument("-p", "--probability", dest="prob",
default=0.1, type=float,
help="Probability for rewiring a link (default: %(default)s)")
parser_rnd.add_argument("-f", "--flip-num", dest="flip_num",
default=int(1E02), type=int,
help="Number of attempts to switch each link (default: %(default)s)")
parser_rnd.set_defaults(func=main_random)
# analysis
parser_anal = subparsers.add_parser("analysis",
help="Analyze the rewired or randomized TRNs")
parser_anal.add_argument("-p", "--probability", dest="prob",
default=0.1, type=float,
help="Probability for rewiring a link to analyze (default: %(default)s)")
parser_anal.add_argument("-f", "--filename", dest="file",
default="trn_random_stats.csv",
help="Name of the file that statistics are written to (default: %(default)s)")
parser_anal.add_argument("-c", "--choose", dest="choose",
default=int(1E02), type=int,
help="Size of the subset of random networks to evaluate (default: %(default)s)")
parser_anal.set_defaults(func=main_analysis)
args = parser.parse_args()
dictConfig({"version": 1, "incremental": True, "root": {"level": args.log_level}})
remote_client = Client(profile=args.profile, cluster_id=args.cluster_id)
try:
sys.exit(args.func(remote_client, args))
except: # we want to catch everything
(err, msg, trace) = sys.exc_info()
# interrupt remote kernels and clear job queue
raise err, msg, trace
finally:
logging.shutdown()
| bsd-3-clause |
KasperPRasmussen/bokeh | bokeh/charts/builder.py | 3 | 25115 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Builder class, a minimal prototype class to build more chart
types on top of it.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from .attributes import AttrSpec, ColorAttr, CatAttr
from .chart import Chart
from .data_source import ChartDataSource
from .models import CompositeGlyph
from .properties import Dimension, ColumnLabel
from .utils import collect_attribute_columns, label_from_index_dict, build_hover_tooltips
from .data_source import OrderedAssigner
from ..models.ranges import Range, Range1d, FactorRange
from ..models.sources import ColumnDataSource
from ..core.properties import (HasProps, Instance, List, String, Dict,
Color, Bool, Tuple, Either)
from ..io import curdoc, curstate
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def create_and_build(builder_class, *data, **kws):
"""A factory function for handling Chart and Builder generation.
Returns:
:class:`Chart`
"""
if getattr(builder_class, 'dimensions') is None:
raise NotImplementedError('Each builder must specify its dimensions, %s does not.' % builder_class.__name__)
if getattr(builder_class, 'default_attributes') is None:
raise NotImplementedError('Each builder must specify its default_attributes, %s does not.' % builder_class.__name__)
builder_props = set(builder_class.properties())
# append dimensions to the builder props
for dim in builder_class.dimensions:
builder_props.add(dim)
# append attributes to the builder props
for attr_name in builder_class.default_attributes.keys():
builder_props.add(attr_name)
# create the new builder
builder_kws = {k: v for k, v in kws.items() if k in builder_props}
builder = builder_class(*data, **builder_kws)
# create a chart to return, since there isn't one already
chart_kws = { k:v for k,v in kws.items() if k not in builder_props}
chart = Chart(**chart_kws)
chart.add_builder(builder)
chart.start_plot()
curdoc()._current_plot = chart # TODO (havocp) store this on state, not doc?
if curstate().autoadd:
curdoc().add_root(chart)
return chart
class Builder(HasProps):
""" A prototype class to inherit each new chart Builder type.
It provides useful methods to be used by the inherited builder classes,
in order to automate most of the charts creation tasks and leave the
core customization to specialized builder classes. In that pattern
inherited builders just need to provide the following methods:
Required:
* :meth:`~bokeh.charts.builder.Builder.yield_renderers`: yields the glyphs to be
rendered into the plot. Here you should call the
:meth:`~bokeh.charts.builder.Builder.add_glyph` method so that the builder can
setup the legend for you.
* :meth:`~bokeh.charts.builder.Builder.set_ranges`: setup the ranges for the
glyphs. This is called after glyph creation, so you are able to inspect the
comp_glyphs for their minimum and maximum values. See the
:meth:`~bokeh.charts.builder.Builder.create` method for more information on
when this is called and how the builder provides the ranges to the containing
:class:`Chart` using the :meth:`Chart.add_ranges` method.
Optional:
* :meth:`~bokeh.charts.builder.Builder.setup`: provides an area
where subclasses of builder can introspect properties, setup attributes, or change
property values. This is called before
:meth:`~bokeh.charts.builder.Builder.process_data`.
* :meth:`~bokeh.charts.builder.Builder.process_data`: provides an area
where subclasses of builder can manipulate the source data before renderers are
created.
"""
# Optional Inputs
x_range = Instance(Range)
y_range = Instance(Range)
xlabel = String()
ylabel = String()
xscale = String()
yscale = String()
palette = List(Color, help="""Optional input to override the default palette used
by any color attribute.
""")
# Dimension Configuration
"""
The dimension labels that drive the position of the
glyphs. Subclasses should implement this so that the Builder
base class knows which dimensions it needs to operate on.
An example for a builder working with cartesian x and y
coordinates would be dimensions = ['x', 'y']. You should
then instantiate the x and y dimensions as attributes of the
subclass of builder using the :class:`Dimension
<bokeh.charts.properties.Dimension>` class. One for x, as x
= Dimension(...), and one as y = Dimension(...).
"""
dimensions = None # None because it MUST be overridden
"""
The dimension labels that must exist to produce the
glyphs. This specifies what are the valid configurations for
the chart, with the option of specifying the type of the
columns. The
:class:`~bokeh.charts.data_source.ChartDataSource` will
inspect this property of your subclass of Builder and use
this to fill in any required dimensions if no keyword
arguments are used.
"""
req_dimensions = []
# Attribute Configuration
attributes = Dict(String, Instance(AttrSpec), help="""
The attribute specs used to group data. This is a mapping between the role of
the attribute spec (e.g. 'color') and the
:class:`~bokeh.charts.attributes.AttrSpec` class (e.g.,
:class:`~bokeh.charts.attributes.ColorAttr`). The Builder will use this
attributes property during runtime, which will consist of any attribute specs
that are passed into the chart creation function (e.g.,
:class:`~bokeh.charts.Bar`), ones that are created for the user from simple
input types (e.g. `Bar(..., color='red')` or `Bar(..., color=<column_name>)`),
or lastly, the attribute spec found in the default_attributes configured for
the subclass of :class:`~bokeh.charts.builder.Builder`.
""")
"""
The default attribute specs used to group data. This is
where the subclass of Builder should specify what the
default attributes are that will yield attribute values to
each group of data, and any specific configuration. For
example, the :class:`ColorAttr` utilizes a default palette
for assigning color based on groups of data. If the user
doesn't assign a column of the data to the associated
attribute spec, then the default attrspec is used, which
will yield a constant color value for each group of
data. This is by default the first color in the default
palette, but can be customized by setting the default color
in the ColorAttr.
"""
default_attributes = None # None because it MUST be overridden
# Derived properties (created by Builder at runtime)
attribute_columns = List(ColumnLabel, help="""
All columns used for specifying attributes for the Chart. The Builder will set
this value on creation so that the subclasses can know the distinct set of columns
that are being used to assign attributes.
""")
comp_glyphs = List(Instance(CompositeGlyph), help="""
A list of composite glyphs, where each represents a unique subset of data. The
composite glyph is a helper class that encapsulates all low level
:class:`~bokeh.models.glyphs.Glyph`, that represent a higher level group of
data. For example, the :class:`BoxGlyph` is a single class that yields
each :class:`GlyphRenderer` needed to produce a Box on a :class:`BoxPlot`. The
single Box represents a full array of values that are aggregated, and is made
up of multiple :class:`~bokeh.models.glyphs.Rect` and
:class:`~bokeh.models.glyphs.Segment` glyphs.
""")
labels = List(String, help="""Represents the unique labels to be used for legends.""")
"""List of attributes to use for legends."""
label_attributes = []
"""
Used to assign columns to dimensions when no selections have been provided. The
default behavior is provided by the :class:`OrderedAssigner`, which assigns
a single column to each dimension available in the `Builder`'s `dims` property.
"""
column_selector = OrderedAssigner
comp_glyph_types = List(Instance(CompositeGlyph))
sort_dim = Dict(String, Bool, default={})
sort_legend = List(Tuple(String, Bool), help="""
List of tuples to use for sorting the legend, in order that they should be
used for sorting. This sorting can be different than the sorting used for the
rest of the chart. For example, you might want to sort only on the column
assigned to the color attribute, or sort it descending. The order of each tuple
is (Column, Ascending).
""")
source = Instance(ColumnDataSource)
tooltips = Either(List(Tuple(String, String)), List(String), Bool, default=None,
help="""
Tells the builder to add tooltips to the chart by either using the columns
specified to the chart attributes (True), or by generating tooltips for each
column specified (list(str)), or by explicit specification of the tooltips
using the valid input for the `HoverTool` tooltips kwarg.
""")
def __init__(self, *args, **kws):
"""Common arguments to be used by all the inherited classes.
Args:
data (:ref:`userguide_charts_data_types`): source data for the chart
legend (str, bool): the legend of your plot. The legend content is
inferred from incoming input.It can be ``top_left``,
``top_right``, ``bottom_left``, ``bottom_right``.
It is ``top_right`` is you set it as True.
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
x_range (obj): x-associated datarange object for you plot,
initialized as a dummy None.
y_range (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ChartDataSource for each Builder class.
attr (list(AttrSpec)): to be filled with the new attributes created after
loading the data dict.
"""
data = None
if len(args) != 0 or len(kws) != 0:
# chart dimensions can be literal dimensions or attributes
attrs = list(self.default_attributes.keys())
dims = self.dimensions + attrs
# pop the dimension inputs from kwargs
data_args = {}
for dim in dims:
if dim in kws.keys():
data_args[dim] = kws[dim]
# build chart data source from inputs, given the dimension configuration
data_args['dims'] = tuple(dims)
data_args['required_dims'] = tuple(self.req_dimensions)
data_args['attrs'] = attrs
data_args['column_assigner'] = self.column_selector
data = ChartDataSource.from_data(*args, **data_args)
# make sure that the builder dimensions have access to the chart data source
for dim in self.dimensions:
getattr(getattr(self, dim), 'set_data')(data)
# handle input attrs and ensure attrs have access to data
attributes = self._setup_attrs(data, kws)
# remove inputs handled by dimensions and chart attributes
for dim in dims:
kws.pop(dim, None)
else:
attributes = dict()
kws['attributes'] = attributes
super(Builder, self).__init__(**kws)
# collect unique columns used for attributes
self.attribute_columns = collect_attribute_columns(**self.attributes)
self._data = data
self._legends = []
def _setup_attrs(self, data, kws):
"""Handle overridden attributes and initialize them with data.
Makes sure that all attributes have access to the data
source, which is used for mapping attributes to groups
of data.
Returns:
None
"""
source = ColumnDataSource(data.df)
attr_names = self.default_attributes.keys()
custom_palette = kws.get('palette')
attributes = dict()
for attr_name in attr_names:
attr = kws.pop(attr_name, None)
# if given an attribute use it
if isinstance(attr, AttrSpec):
attributes[attr_name] = attr
# if we are given columns, use those
elif isinstance(attr, str) or isinstance(attr, list):
attributes[attr_name] = self.default_attributes[attr_name]._clone()
# override palette if available
if isinstance(attributes[attr_name], ColorAttr):
if custom_palette is not None:
attributes[attr_name].iterable = custom_palette
attributes[attr_name].setup(data=source, columns=attr)
else:
# override palette if available
if (isinstance(self.default_attributes[attr_name], ColorAttr) and
custom_palette is not None):
attributes[attr_name] = self.default_attributes[attr_name]._clone()
attributes[attr_name].iterable = custom_palette
else:
attributes[attr_name] = self.default_attributes[attr_name]._clone()
# make sure all have access to data source
for attr_name in attr_names:
attributes[attr_name].update_data(data=source)
return attributes
def setup(self):
"""Perform any initial pre-processing, attribute config.
Returns:
None
"""
pass
def process_data(self):
"""Make any global data manipulations before grouping.
It has to be implemented by any of the inherited class
representing each different chart type. It is the place
where we make specific calculations for each chart.
Returns:
None
"""
pass
def yield_renderers(self):
""" Generator that yields the glyphs to be draw on the plot
It has to be implemented by any of the inherited class
representing each different chart type.
Yields:
:class:`GlyphRenderer`
"""
raise NotImplementedError('Subclasses of %s must implement _yield_renderers.' %
self.__class__.__name__)
def set_ranges(self):
"""Calculate and set the x and y ranges.
It has to be implemented by any of the subclasses of builder
representing each different chart type, and is called after
:meth:`yield_renderers`.
Returns:
None
"""
raise NotImplementedError('Subclasses of %s must implement _set_ranges.' %
self.__class__.__name__)
def get_dim_extents(self):
"""Helper method to retrieve maximum extents of all the renderers.
Returns:
a dict mapping between dimension and value for x_max, y_max, x_min, y_min
"""
return {'x_max': max([renderer.x_max for renderer in self.comp_glyphs]),
'y_max': max([renderer.y_max for renderer in self.comp_glyphs]),
'x_min': min([renderer.x_min for renderer in self.comp_glyphs]),
'y_min': min([renderer.y_min for renderer in self.comp_glyphs])
}
def add_glyph(self, group, glyph):
"""Add a composite glyph.
Manages the legend, since the builder might not want all attribute types
used for the legend.
Args:
group (:class:`DataGroup`): the data the `glyph` is associated with
glyph (:class:`CompositeGlyph`): the glyph associated with the `group`
Returns:
None
"""
if isinstance(glyph, list):
for sub_glyph in glyph:
self.comp_glyphs.append(sub_glyph)
else:
self.comp_glyphs.append(glyph)
# handle cases where builders have specified which attributes to use for labels
label = None
if len(self.label_attributes) > 0:
for attr in self.label_attributes:
# this will get the last attribute group label for now
if self.attributes[attr].columns is not None:
label = self._get_group_label(group, attr=attr)
# if no special case for labeling, just use the group label
if label is None:
label = self._get_group_label(group, attr='label')
# add to legend if new and unique label
if str(label) not in self.labels and label is not None:
self._legends.append((label, glyph.renderers))
self.labels.append(label)
def _get_group_label(self, group, attr='label'):
"""Get the label of the group by the attribute name.
Args:
group (:attr:`DataGroup`: the group of data
attr (str, optional): the attribute name containing the label, defaults to
'label'.
Returns:
str: the label for the group
"""
if attr is 'label':
label = group.label
else:
label = group[attr]
if isinstance(label, dict):
label = tuple(label.values())
return self._get_label(label)
@staticmethod
def _get_label(raw_label):
"""Converts a label by string or tuple to a string representation.
Args:
raw_label (str or tuple(any, any)): a unique identifier for the data group
Returns:
str: a label that is usable in charts
"""
# don't convert None type to string so we can test for it later
if raw_label is None:
return None
if (isinstance(raw_label, tuple) or isinstance(raw_label, list)) and \
len(raw_label) == 1:
raw_label = raw_label[0]
elif isinstance(raw_label, dict):
raw_label = label_from_index_dict(raw_label)
return str(raw_label)
def collect_attr_kwargs(self):
if hasattr(super(self.__class__, self), 'default_attributes'):
attrs = set(self.default_attributes.keys()) - set(
(super(self.__class__, self).default_attributes or {}).keys())
else:
attrs = set()
return attrs
def get_group_kwargs(self, group, attrs):
return {attr: group[attr] for attr in attrs}
def create(self, chart=None):
"""Builds the renderers, adding them and other components to the chart.
Args:
chart (:class:`Chart`, optional): the chart that will contain the glyph
renderers that the `Builder` produces.
Returns:
:class:`Chart`
"""
# call methods that allow customized setup by subclasses
self.setup()
self.process_data()
# create and add renderers to chart
renderers = self.yield_renderers()
if chart is None:
chart = Chart()
chart.add_renderers(self, renderers)
# handle ranges after renders, since ranges depend on aggregations
# ToDo: should reconsider where this occurs
self.set_ranges()
chart.add_ranges('x', self.x_range)
chart.add_ranges('y', self.y_range)
# always contribute legends, let Chart sort it out
chart.add_legend(self._legends)
chart.add_labels('x', self.xlabel)
chart.add_labels('y', self.ylabel)
chart.add_scales('x', self.xscale)
chart.add_scales('y', self.yscale)
if self.tooltips is not None:
tooltips = build_hover_tooltips(hover_spec=self.tooltips,
chart_cols=self.attribute_columns)
chart.add_tooltips(tooltips)
return chart
@classmethod
def generate_help(cls):
help_str = ''
for comp_glyph in cls.comp_glyph_types:
help_str += str(comp_glyph.glyph_properties())
return help_str
class XYBuilder(Builder):
"""Implements common functionality for XY Builders."""
x = Dimension('x')
y = Dimension('y')
dimensions = ['x', 'y']
req_dimensions = [['x'],
['y'],
['x', 'y']]
default_attributes = {'color': ColorAttr()}
def set_ranges(self):
"""Calculate and set the x and y ranges."""
# ToDo: handle when only single dimension is provided
extents = self.get_dim_extents()
endx = extents['x_max']
startx = extents['x_min']
self.x_range = self._get_range('x', startx, endx)
endy = extents['y_max']
starty = extents['y_min']
self.y_range = self._get_range('y', starty, endy)
if self.xlabel is None:
if self.x.selection is not None:
select = self.x.selection
if not isinstance(select, list):
select = [select]
else:
select = ['']
self.xlabel = ', '.join(select)
if self.ylabel is None:
if self.y.selection is not None:
select = self.y.selection
if not isinstance(select, list):
select = [select]
else:
select = ['']
self.ylabel = ', '.join(select)
# sort the legend if we are told to
if len(self.sort_legend) > 0:
for attr, asc in self.sort_legend:
if len(self.attributes[attr].columns) > 0:
item_order = self.attributes[attr].items
self._legends = list(sorted(self._legends, key=lambda leg:
item_order.index(leg[0]),
reverse=~asc))
def _get_range(self, dim, start, end):
"""Create a :class:`Range` for the :class:`Chart`.
Args:
dim (str): the name of the dimension, which is an attribute of the builder
start: the starting value of the range
end: the ending value of the range
Returns:
:class:`Range`
"""
dim_ref = getattr(self, dim)
values = dim_ref.data
dtype = dim_ref.dtype.name
sort = self.sort_dim.get(dim)
# object data or single value
if dtype == 'object':
factors = values.drop_duplicates()
if sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
factors.sort_values(inplace=True)
except AttributeError:
factors.sort(inplace=True)
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=factors.tolist())
elif 'datetime' in dtype:
setattr(self, dim + 'scale', 'datetime')
return Range1d(start=start, end=end)
else:
if end == 'None' or (end - start) == 0:
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=['None'])
else:
diff = end - start
setattr(self, dim + 'scale', 'linear')
return Range1d(start=start - 0.1 * diff, end=end + 0.1 * diff)
class AggregateBuilder(Builder):
"""A base class for deriving specific builders performing aggregation with stats.
The typical AggregateBuilder takes a single dimension of values.
"""
values = Dimension('values')
default_attributes = {'label': CatAttr(),
'color': ColorAttr()}
| bsd-3-clause |
icdishb/scikit-learn | examples/svm/plot_weighted_samples.py | 69 | 1942 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasis the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
tomasreimers/tensorflow-emscripten | tensorflow/examples/learn/iris_run_config.py | 86 | 2087 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with run config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# You can define you configurations by providing a RunConfig object to
# estimator to control session configurations, e.g. num_cores
# and gpu_memory_fraction
run_config = tf.contrib.learn.estimators.RunConfig(
num_cores=3, gpu_memory_fraction=0.6)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
config=run_config)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
rc/sfepy | sfepy/postprocess/plot_quadrature.py | 4 | 3851 | """
Functions to visualize quadrature points in reference elements.
"""
import numpy as nm
import matplotlib.pyplot as plt
from sfepy.base.base import output
from sfepy.postprocess.plot_dofs import _get_axes, _to2d
from sfepy.postprocess.plot_facets import plot_geometry
def _get_qp(geometry, order):
from sfepy.discrete import Integral
from sfepy.discrete.fem.geometry_element import GeometryElement
aux = Integral('aux', order=order)
coors, weights = aux.get_qp(geometry)
true_order = aux.qps[geometry].order
output('geometry:', geometry, 'order:', order, 'num. points:',
coors.shape[0], 'true_order:', true_order)
output('min. weight:', weights.min())
output('max. weight:', weights.max())
return GeometryElement(geometry), coors, weights
def _get_bqp(geometry, order):
from sfepy.discrete import Integral
from sfepy.discrete.fem.geometry_element import GeometryElement
from sfepy.discrete.fem import Mesh, FEDomain, Field
gel = GeometryElement(geometry)
mesh = Mesh.from_data('aux', gel.coors, None,
[gel.conn[None, :]], [[0]], [geometry])
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
surf = domain.create_region('Surf', 'vertices of surface', 'facet')
field = Field.from_args('f', nm.float64, shape=1,
region=omega, approx_order=1)
field.setup_surface_data(surf)
integral = Integral('aux', order=order)
field.create_bqp('Surf', integral)
sd = field.surface_data['Surf']
qp = field.qp_coors[(integral.order, sd.bkey)]
output('geometry:', geometry, 'order:', order, 'num. points:',
qp.vals.shape[1], 'true_order:',
integral.qps[gel.surface_facet_name].order)
output('min. weight:', qp.weights.min())
output('max. weight:', qp.weights.max())
return (gel, qp.vals.reshape((-1, mesh.dim)),
nm.tile(qp.weights, qp.vals.shape[0]))
def plot_weighted_points(ax, coors, weights, min_radius=10, max_radius=50,
show_colorbar=False):
"""
Plot points with given coordinates as circles/spheres with radii given by
weights.
"""
dim = coors.shape[1]
ax = _get_axes(ax, dim)
wmin, wmax = weights.min(), weights.max()
if (wmax - wmin) < 1e-12:
nweights = weights * max_radius / wmax
else:
nweights = ((weights - wmin) * (max_radius - min_radius)
/ (wmax - wmin) + min_radius)
coors = _to2d(coors)
sc = ax.scatter(*coors.T, s=nweights, c=weights, alpha=1)
if show_colorbar:
plt.colorbar(sc)
return ax
def label_points(ax, coors):
"""
Label points with their indices.
"""
dim = coors.shape[1]
ax = _get_axes(ax, dim)
shift = 0.02 * (coors.max(0) - coors.min(0))
ccs = coors + shift
for ic, cc in enumerate(ccs):
ax.text(*cc, s='%d' % ic, color='b')
def plot_quadrature(ax, geometry, order, boundary=False,
min_radius=10, max_radius=50,
show_colorbar=False, show_labels=False):
"""
Plot quadrature points for the given geometry and integration order.
The points are plotted as circles/spheres with radii given by quadrature
weights - the weights are mapped to [`min_radius`, `max_radius`] interval.
"""
if not boundary:
gel, coors, weights = _get_qp(geometry, order)
else:
gel, coors, weights = _get_bqp(geometry, order)
dim = coors.shape[1]
ax = _get_axes(ax, dim)
plot_geometry(ax, gel)
plot_weighted_points(ax, coors, weights,
min_radius=min_radius, max_radius=max_radius,
show_colorbar=show_colorbar)
if show_labels:
label_points(ax, coors)
return ax, coors, weights
| bsd-3-clause |
pushpajnc/models | creating_customer_segments/renders.py | 1 | 4134 | import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
import numpy as np
from sklearn.decomposition import pca
def pca_results(good_data, pca):
'''
Create a DataFrame of the PCA results
Includes dimension feature weights and explained variance
Visualizes the PCA results
'''
# Dimension indexing
dimensions = dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)]
# PCA components
components = pd.DataFrame(np.round(pca.components_, 4), columns = good_data.keys())
components.index = dimensions
# PCA explained variance
ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1)
variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance'])
variance_ratios.index = dimensions
# Create a bar plot visualization
fig, ax = plt.subplots(figsize = (14,8))
# Plot the feature weights as a function of the components
components.plot(ax = ax, kind = 'bar');
ax.set_ylabel("Feature Weights")
ax.set_xticklabels(dimensions, rotation=0)
# Display the explained variance ratios
for i, ev in enumerate(pca.explained_variance_ratio_):
ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev))
# Return a concatenated DataFrame
return pd.concat([variance_ratios, components], axis = 1)
def cluster_results(reduced_data, preds, centers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions
Adds cues for cluster centers and student-selected sample data
'''
predictions = pd.DataFrame(preds, columns = ['Cluster'])
plot_data = pd.concat([predictions, reduced_data], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned cluster
for i, cluster in plot_data.groupby('Cluster'):
cluster.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i)*1.0/(len(centers)-1)), label = 'Cluster %i'%(i), s=30);
# Plot centers with indicators
for i, c in enumerate(centers):
ax.scatter(x = c[0], y = c[1], color = 'white', edgecolors = 'black', \
alpha = 1, linewidth = 2, marker = 'o', s=200);
ax.scatter(x = c[0], y = c[1], marker='$%d$'%(i), alpha = 1, s=100);
# Plot transformed sample points
ax.scatter(x = pca_samples[:,0], y = pca_samples[:,1], \
s = 150, linewidth = 4, color = 'black', marker = 'x');
# Set plot title
ax.set_title("Cluster Learning on PCA-Reduced Data - Centroids Marked by Number\nTransformed Sample Data Marked by Black Cross");
def channel_results(reduced_data, outliers, pca_samples):
'''
Visualizes the PCA-reduced cluster data in two dimensions using the full dataset
Data is labeled by "Channel" and cues added for student-selected sample data
'''
# Check that the dataset is loadable
try:
full_data = pd.read_csv("customers.csv")
except:
print "Dataset could not be loaded. Is the file missing?"
return False
# Create the Channel DataFrame
channel = pd.DataFrame(full_data['Channel'], columns = ['Channel'])
channel = channel.drop(channel.index[outliers]).reset_index(drop = True)
labeled = pd.concat([reduced_data, channel], axis = 1)
# Generate the cluster plot
fig, ax = plt.subplots(figsize = (14,8))
# Color map
cmap = cm.get_cmap('gist_rainbow')
# Color the points based on assigned Channel
labels = ['Hotel/Restaurant/Cafe', 'Retailer']
grouped = labeled.groupby('Channel')
for i, channel in grouped:
channel.plot(ax = ax, kind = 'scatter', x = 'Dimension 1', y = 'Dimension 2', \
color = cmap((i-1)*1.0/2), label = labels[i-1], s=30);
# Plot transformed sample points
for i, sample in enumerate(pca_samples):
ax.scatter(x = sample[0], y = sample[1], \
s = 200, linewidth = 3, color = 'black', marker = 'o', facecolors = 'none');
ax.scatter(x = sample[0]+0.25, y = sample[1]+0.3, marker='$%d$'%(i), alpha = 1, s=125);
# Set plot title
ax.set_title("PCA-Reduced Data Labeled by 'Channel'\nTransformed Sample Data Circled");
| mit |
aaltay/beam | sdks/python/apache_beam/dataframe/doctests.py | 6 | 24708 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module that allows running existing pandas doctests with Beam dataframes.
This module hooks into the doctesting framework by providing a custom
runner and, in particular, an OutputChecker, as well as providing a fake
object for mocking out the pandas module.
The (novel) sequence of events when running a doctest is as follows.
1. The test invokes `pd.DataFrame(...)` (or similar) and an actual dataframe
is computed and stashed but a Beam deferred dataframe is returned
in its place.
2. Computations are done on these "dataframes," resulting in new objects,
but as these are actually deferred, only expression trees are built.
In the background, a mapping of id -> deferred dataframe is stored for
each newly created dataframe.
3. When any dataframe is printed out, the repr has been overwritten to
print `Dataframe[id]`. The aforementened mapping is used to map this back
to the actual dataframe object, which is then computed via Beam, and its
the (stringified) result plugged into the actual output for comparison.
4. The comparison is then done on the sorted lines of the expected and actual
values.
"""
import collections
import contextlib
import doctest
import re
import sys
import traceback
from io import StringIO
from typing import Any
from typing import Dict
from typing import List
import numpy as np
import pandas as pd
import apache_beam as beam
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frames # pylint: disable=unused-import
from apache_beam.dataframe import pandas_top_level_functions
from apache_beam.dataframe import transforms
from apache_beam.dataframe.frame_base import DeferredBase
class FakePandasObject(object):
"""A stand-in for the wrapped pandas objects.
"""
def __init__(self, pandas_obj, test_env):
self._pandas_obj = pandas_obj
self._test_env = test_env
def __call__(self, *args, **kwargs):
result = self._pandas_obj(*args, **kwargs)
if type(result) in DeferredBase._pandas_type_map.keys():
placeholder = expressions.PlaceholderExpression(result.iloc[0:0])
self._test_env._inputs[placeholder] = result
return DeferredBase.wrap(placeholder)
else:
return result
def __getattr__(self, name):
attr = getattr(self._pandas_obj, name)
if callable(attr):
result = FakePandasObject(attr, self._test_env)
else:
result = attr
# Cache this so two lookups return the same object.
setattr(self, name, result)
return result
def __reduce__(self):
return lambda: pd, ()
class TestEnvironment(object):
"""A class managing the patching (of methods, inputs, and outputs) needed
to run and validate tests.
These classes are patched to be able to recognize and retrieve inputs
and results, stored in `self._inputs` and `self._all_frames` respectively.
"""
def __init__(self):
self._inputs = {}
self._all_frames = {}
def fake_pandas_module(self):
return FakePandasObject(pandas_top_level_functions.pd_wrapper, self)
@contextlib.contextmanager
def _monkey_patch_type(self, deferred_type):
"""Monkey-patch __init__ to record a pointer to all created frames, and
__repr__ to be able to recognize them in the doctest output.
"""
try:
old_init, old_repr = deferred_type.__init__, deferred_type.__repr__
def new_init(df, *args, **kwargs):
old_init(df, *args, **kwargs)
self._all_frames[id(df)] = df
deferred_type.__init__ = new_init
deferred_type.__repr__ = lambda self: 'DeferredBase[%s]' % id(self)
self._recorded_results = collections.defaultdict(list)
yield
finally:
deferred_type.__init__, deferred_type.__repr__ = old_init, old_repr
@contextlib.contextmanager
def context(self):
"""Creates a context within which DeferredBase types are monkey patched
to record ids."""
with contextlib.ExitStack() as stack:
for deferred_type in DeferredBase._pandas_type_map.values():
stack.enter_context(self._monkey_patch_type(deferred_type))
yield
class _InMemoryResultRecorder(object):
"""Helper for extracting computed results from a Beam pipeline.
Used as follows::
with _InMemoryResultRecorder() as recorder:
with beam.Pipeline() as p:
...
pcoll | beam.Map(recorder.record_fn(name))
seen = recorder.get_recorded(name)
"""
# Class-level value to survive pickling.
_ALL_RESULTS = {} # type: Dict[str, List[Any]]
def __init__(self):
self._id = id(self)
def __enter__(self):
self._ALL_RESULTS[self._id] = collections.defaultdict(list)
return self
def __exit__(self, *unused_args):
del self._ALL_RESULTS[self._id]
def record_fn(self, name):
def record(value):
self._ALL_RESULTS[self._id][name].append(value)
return record
def get_recorded(self, name):
return self._ALL_RESULTS[self._id][name]
WONT_IMPLEMENT = 'apache_beam.dataframe.frame_base.WontImplementError'
NOT_IMPLEMENTED = 'NotImplementedError'
class _DeferrredDataframeOutputChecker(doctest.OutputChecker):
"""Validates output by replacing DeferredBase[...] with computed values.
"""
def __init__(self, env, use_beam):
self._env = env
if use_beam:
self.compute = self.compute_using_beam
else:
self.compute = self.compute_using_session
self.reset()
def reset(self):
self._last_error = None
def compute_using_session(self, to_compute):
session = expressions.PartitioningSession(self._env._inputs)
return {
name: session.evaluate(frame._expr)
for name,
frame in to_compute.items()
}
def compute_using_beam(self, to_compute):
with _InMemoryResultRecorder() as recorder:
with beam.Pipeline() as p:
input_pcolls = {
placeholder: p
| 'Create%s' % placeholder >> beam.Create([input[::2], input[1::2]])
for placeholder,
input in self._env._inputs.items()
}
output_pcolls = (
input_pcolls | transforms._DataframeExpressionsTransform(
{name: frame._expr
for name, frame in to_compute.items()}))
for name, output_pcoll in output_pcolls.items():
_ = output_pcoll | 'Record%s' % name >> beam.FlatMap(
recorder.record_fn(name))
# pipeline runs, side effects recorded
def concat(values):
if len(values) > 1:
return pd.concat(values)
else:
return values[0]
return {
name: concat(recorder.get_recorded(name))
for name in to_compute.keys()
}
def fix(self, want, got):
if 'DeferredBase' in got:
try:
to_compute = {
m.group(0): self._env._all_frames[int(m.group(1))]
for m in re.finditer(r'DeferredBase\[(\d+)\]', got)
}
computed = self.compute(to_compute)
for name, frame in computed.items():
got = got.replace(name, repr(frame))
# If a multiindex is used, compensate for it
if any(isinstance(frame, pd.core.generic.NDFrame) and
frame.index.nlevels > 1 for frame in computed.values()):
def fill_multiindex(text):
"""An awful hack to work around the fact that pandas omits repeated
elements in a multi-index.
For example:
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
The s1 and s2 are implied for the 2nd and 4th rows. However if we
re-order this Series it might be printed this way:
Series name Row ID
s1 0 a
s2 1 d
s2 0 c
s1 1 b
dtype: object
In our model these are equivalent, but when we sort the lines and
check equality they are not. This method fills in any omitted
multiindex values, so that we can successfully sort and compare."""
lines = [list(line) for line in text.split('\n')]
for prev, line in zip(lines[:-1], lines[1:]):
if all(l == ' ' for l in line):
continue
for i, l in enumerate(line):
if l != ' ':
break
line[i] = prev[i]
return '\n'.join(''.join(line) for line in lines)
got = fill_multiindex(got)
want = fill_multiindex(want)
def sort_and_normalize(text):
return '\n'.join(
sorted(
[line.rstrip() for line in text.split('\n') if line.strip()],
key=str.strip)) + '\n'
got = sort_and_normalize(got)
want = sort_and_normalize(want)
except Exception:
got = traceback.format_exc()
return want, got
@property
def _seen_error(self):
return self._last_error is not None
def check_output(self, want, got, optionflags):
# When an error occurs check_output is called with want=example.exc_msg,
# and got=exc_msg
# First check if `want` is a special string indicating wont_implement_ok
# and/or not_implemented_ok
allowed_exceptions = want.split('|')
if all(exc in (WONT_IMPLEMENT, NOT_IMPLEMENTED)
for exc in allowed_exceptions):
# If it is, check for WontImplementError and NotImplementedError
if WONT_IMPLEMENT in allowed_exceptions and got.startswith(
WONT_IMPLEMENT):
self._last_error = WONT_IMPLEMENT
return True
elif NOT_IMPLEMENTED in allowed_exceptions and got.startswith(
NOT_IMPLEMENTED):
self._last_error = NOT_IMPLEMENTED
return True
elif got.startswith('NameError') and self._seen_error:
# This allows us to gracefully skip tests like
# >>> res = df.unsupported_operation()
# >>> check(res)
return True
self.reset()
want, got = self.fix(want, got)
return super(_DeferrredDataframeOutputChecker,
self).check_output(want, got, optionflags)
def output_difference(self, example, got, optionflags):
want, got = self.fix(example.want, got)
if want != example.want:
example = doctest.Example(
example.source,
want,
example.exc_msg,
example.lineno,
example.indent,
example.options)
return super(_DeferrredDataframeOutputChecker,
self).output_difference(example, got, optionflags)
class BeamDataframeDoctestRunner(doctest.DocTestRunner):
"""A Doctest runner suitable for replacing the `pd` module with one backed
by beam.
"""
def __init__(
self,
env,
use_beam=True,
wont_implement_ok=None,
not_implemented_ok=None,
skip=None,
**kwargs):
self._test_env = env
def to_callable(cond):
if cond == '*':
return lambda example: True
else:
return lambda example: example.source.strip() == cond
self._wont_implement_ok = {
test: [to_callable(cond) for cond in examples]
for test,
examples in (wont_implement_ok or {}).items()
}
self._not_implemented_ok = {
test: [to_callable(cond) for cond in examples]
for test,
examples in (not_implemented_ok or {}).items()
}
self._skip = {
test: [to_callable(cond) for cond in examples]
for test,
examples in (skip or {}).items()
}
super(BeamDataframeDoctestRunner, self).__init__(
checker=_DeferrredDataframeOutputChecker(self._test_env, use_beam),
**kwargs)
self.success = 0
self.skipped = 0
self._reasons = collections.defaultdict(list)
self._skipped_set = set()
def _is_wont_implement_ok(self, example, test):
return any(
wont_implement(example)
for wont_implement in self._wont_implement_ok.get(test.name, []))
def _is_not_implemented_ok(self, example, test):
return any(
not_implemented(example)
for not_implemented in self._not_implemented_ok.get(test.name, []))
def run(self, test, **kwargs):
self._checker.reset()
for example in test.examples:
if any(should_skip(example)
for should_skip in self._skip.get(test.name, [])):
self._skipped_set.add(example)
example.source = 'pass'
example.want = ''
self.skipped += 1
elif example.exc_msg is None:
allowed_exceptions = []
if self._is_not_implemented_ok(example, test):
allowed_exceptions.append(NOT_IMPLEMENTED)
if self._is_wont_implement_ok(example, test):
allowed_exceptions.append(WONT_IMPLEMENT)
if len(allowed_exceptions):
# Don't fail doctests that raise this error.
example.exc_msg = '|'.join(allowed_exceptions)
with self._test_env.context():
result = super(BeamDataframeDoctestRunner, self).run(test, **kwargs)
# Can't add attributes to builtin result.
result = AugmentedTestResults(result.failed, result.attempted)
result.summary = self.summary()
return result
def report_success(self, out, test, example, got):
def extract_concise_reason(got, expected_exc):
m = re.search(r"Implement(?:ed)?Error:\s+(.*)\n$", got)
if m:
return m.group(1)
elif "NameError" in got:
return "NameError following %s" % expected_exc
elif re.match(r"DeferredBase\[\d+\]\n", got):
return "DeferredBase[*]"
else:
return got.replace("\n", "\\n")
if self._checker._last_error is not None:
self._reasons[self._checker._last_error].append(
extract_concise_reason(got, self._checker._last_error))
if self._checker._seen_error:
m = re.search('^([a-zA-Z0-9_, ]+)=', example.source)
if m:
for var in m.group(1).split(','):
var = var.strip()
if var in test.globs:
# More informative to get a NameError than
# use the wrong previous value.
del test.globs[var]
return super(BeamDataframeDoctestRunner,
self).report_success(out, test, example, got)
def fake_pandas_module(self):
return self._test_env.fake_pandas_module()
def summarize(self):
super(BeamDataframeDoctestRunner, self).summarize()
self.summary().summarize()
def summary(self):
return Summary(self.failures, self.tries, self.skipped, self._reasons)
class AugmentedTestResults(doctest.TestResults):
pass
class Summary(object):
def __init__(self, failures=0, tries=0, skipped=0, error_reasons=None):
self.failures = failures
self.tries = tries
self.skipped = skipped
self.error_reasons = error_reasons or collections.defaultdict(list)
def result(self):
res = AugmentedTestResults(self.failures, self.tries)
res.summary = self
return res
def __add__(self, other):
merged_reasons = {
key: self.error_reasons.get(key, []) + other.error_reasons.get(key, [])
for key in set(self.error_reasons.keys()).union(
other.error_reasons.keys())
}
return Summary(
self.failures + other.failures,
self.tries + other.tries,
self.skipped + other.skipped,
merged_reasons)
def summarize(self):
def print_partition(indent, desc, n, total):
print("%s%d %s (%.1f%%)" % (" " * indent, n, desc, n / total * 100))
print()
print("%d total test cases:" % self.tries)
if not self.tries:
return
print_partition(1, "skipped", self.skipped, self.tries)
for error, reasons in self.error_reasons.items():
print_partition(1, error, len(reasons), self.tries)
reason_counts = sorted(
collections.Counter(reasons).items(),
key=lambda x: x[1],
reverse=True)
for desc, count in reason_counts:
print_partition(2, desc, count, len(reasons))
print_partition(1, "failed", self.failures, self.tries)
print_partition(
1,
"passed",
self.tries - self.skipped -
sum(len(reasons)
for reasons in self.error_reasons.values()) - self.failures,
self.tries)
print()
def parse_rst_ipython_tests(rst, name, extraglobs=None, optionflags=None):
"""Extracts examples from an rst file and produce a test suite by running
them through pandas to get the expected outputs.
"""
# Optional dependency.
import IPython
from traitlets.config import Config
def get_indent(line):
return len(line) - len(line.lstrip())
def is_example_line(line):
line = line.strip()
return line and not line.startswith('#') and not line[0] == line[-1] == ':'
IMPORT_PANDAS = 'import pandas as pd'
example_srcs = []
lines = iter([(lineno, line.rstrip()) for lineno,
line in enumerate(rst.split('\n')) if is_example_line(line)] +
[(None, 'END')])
# https://ipython.readthedocs.io/en/stable/sphinxext.html
lineno, line = next(lines)
while True:
if line == 'END':
break
if line.startswith('.. ipython::'):
lineno, line = next(lines)
indent = get_indent(line)
example = []
example_srcs.append((lineno, example))
while get_indent(line) >= indent:
if '@verbatim' in line or ':verbatim:' in line or '@savefig' in line:
example_srcs.pop()
break
line = re.sub(r'In \[\d+\]: ', '', line)
line = re.sub(r'\.\.\.+:', '', line)
example.append(line[indent:])
lineno, line = next(lines)
if get_indent(line) == indent and line[indent] not in ')]}':
example = []
example_srcs.append((lineno, example))
else:
lineno, line = next(lines)
# TODO(robertwb): Would it be better to try and detect/compare the actual
# objects in two parallel sessions than make (stringified) doctests?
examples = []
config = Config()
config.HistoryManager.hist_file = ':memory:'
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
set_pandas_options()
IP = IPython.InteractiveShell.instance(config=config)
IP.run_cell(IMPORT_PANDAS + '\n')
IP.run_cell('import numpy as np\n')
try:
stdout = sys.stdout
for lineno, src in example_srcs:
sys.stdout = cout = StringIO()
src = '\n'.join(src)
if src == IMPORT_PANDAS:
continue
IP.run_cell(src + '\n')
output = cout.getvalue()
if output:
# Strip the prompt.
# TODO(robertwb): Figure out how to suppress this.
output = re.sub(r'^Out\[\d+\]:[ \t]*\n?', '', output)
examples.append(doctest.Example(src, output, lineno=lineno))
finally:
sys.stdout = stdout
return doctest.DocTest(
examples, dict(extraglobs or {}, np=np), name, name, None, None)
def test_rst_ipython(
rst,
name,
report=False,
wont_implement_ok=(),
not_implemented_ok=(),
skip=(),
**kwargs):
"""Extracts examples from an rst file and run them through pandas to get the
expected output, and then compare them against our dataframe implementation.
"""
def run_tests(extraglobs, optionflags, **kwargs):
# The patched one.
tests = parse_rst_ipython_tests(rst, name, extraglobs, optionflags)
runner = doctest.DocTestRunner(optionflags=optionflags)
set_pandas_options()
result = runner.run(tests, **kwargs)
if report:
runner.summarize()
return result
result = _run_patched(
run_tests,
wont_implement_ok={name: wont_implement_ok},
not_implemented_ok={name: not_implemented_ok},
skip={name: skip},
**kwargs)
return result
def teststring(text, wont_implement_ok=None, not_implemented_ok=None, **kwargs):
return teststrings(
{'<string>': text},
wont_implement_ok={'<string>': ['*']} if wont_implement_ok else None,
not_implemented_ok={'<string>': ['*']} if not_implemented_ok else None,
**kwargs)
def teststrings(texts, report=False, **runner_kwargs):
optionflags = runner_kwargs.pop('optionflags', 0)
optionflags |= (
doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL)
parser = doctest.DocTestParser()
runner = BeamDataframeDoctestRunner(
TestEnvironment(), optionflags=optionflags, **runner_kwargs)
globs = {
'pd': runner.fake_pandas_module(),
'np': np,
'option_context': pd.option_context,
}
with expressions.allow_non_parallel_operations():
for name, text in texts.items():
test = parser.get_doctest(text, globs, name, name, 0)
runner.run(test)
if report:
runner.summarize()
return runner.summary().result()
def set_pandas_options():
# See
# https://github.com/pandas-dev/pandas/blob/a00202d12d399662b8045a8dd3fdac04f18e1e55/doc/source/conf.py#L319
np.random.seed(123456)
np.set_printoptions(precision=4, suppress=True)
pd.options.display.max_rows = 15
def _run_patched(func, *args, **kwargs):
set_pandas_options()
# https://github.com/pandas-dev/pandas/blob/1.0.x/setup.cfg#L63
optionflags = kwargs.pop('optionflags', 0)
optionflags |= (
doctest.NORMALIZE_WHITESPACE | doctest.IGNORE_EXCEPTION_DETAIL)
env = TestEnvironment()
use_beam = kwargs.pop('use_beam', True)
skip = kwargs.pop('skip', {})
wont_implement_ok = kwargs.pop('wont_implement_ok', {})
not_implemented_ok = kwargs.pop('not_implemented_ok', {})
extraglobs = dict(kwargs.pop('extraglobs', {}))
extraglobs['pd'] = env.fake_pandas_module()
try:
# Unfortunately the runner is not injectable.
original_doc_test_runner = doctest.DocTestRunner
doctest.DocTestRunner = lambda **kwargs: BeamDataframeDoctestRunner(
env,
use_beam=use_beam,
wont_implement_ok=wont_implement_ok,
not_implemented_ok=not_implemented_ok,
skip=skip,
**kwargs)
with expressions.allow_non_parallel_operations():
return func(
*args, extraglobs=extraglobs, optionflags=optionflags, **kwargs)
finally:
doctest.DocTestRunner = original_doc_test_runner
def with_run_patched_docstring(target=None):
assert target is not None
def wrapper(fn):
fn.__doc__ = f"""Run all pandas doctests in the specified {target}.
Arguments `skip`, `wont_implement_ok`, `not_implemented_ok` are all in the
format::
{{
"module.Class.method": ['*'],
"module.Class.other_method": [
'instance.other_method(bad_input)',
'observe_result_of_bad_input()',
],
}}
`'*'` indicates all examples should be matched, otherwise the list is a list
of specific input strings that should be matched.
All arguments are kwargs.
Args:
optionflags (int): Passed through to doctests.
extraglobs (Dict[str,Any]): Passed through to doctests.
use_beam (bool): If true, run a Beam pipeline with partitioned input to
verify the examples, else use PartitioningSession to simulate
distributed execution.
skip (Dict[str,str]): A set of examples to skip entirely.
wont_implement_ok (Dict[str,str]): A set of examples that are allowed to
raise WontImplementError.
not_implemented_ok (Dict[str,str]): A set of examples that are allowed to
raise NotImplementedError.
Returns:
~doctest.TestResults: A doctest result describing the passed/failed tests.
"""
return fn
return wrapper
@with_run_patched_docstring(target="file")
def testfile(*args, **kwargs):
return _run_patched(doctest.testfile, *args, **kwargs)
@with_run_patched_docstring(target="module")
def testmod(*args, **kwargs):
return _run_patched(doctest.testmod, *args, **kwargs)
| apache-2.0 |
anirudhjayaraman/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
belteshassar/cartopy | lib/cartopy/tests/mpl/test_patch.py | 3 | 2096 | # (C) British Crown Copyright 2015 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import six
import unittest
from matplotlib.path import Path
import shapely.geometry as sgeom
import cartopy.mpl.patch as cpatch
class Test_path_to_geos(unittest.TestCase):
def test_empty_polyon(self):
p = Path([[0, 0], [0, 0], [0, 0], [0, 0],
[1, 2], [1, 2], [1, 2], [1, 2]],
codes=[1, 2, 2, 79,
1, 2, 2, 79])
geoms = cpatch.path_to_geos(p)
self.assertEqual(list(type(geom) for geom in geoms),
[sgeom.Point, sgeom.Point])
self.assertEqual(len(geoms), 2)
def test_polygon_with_interior_and_singularity(self):
# A geometry with two interiors, one a single point.
p = Path([[0, -90], [200, -40], [200, 40], [0, 40], [0, -90],
[126, 26], [126, 26], [126, 26], [126, 26], [126, 26],
[114, 5], [103, 8], [126, 12], [126, 0], [114, 5]],
codes=[1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2])
geoms = cpatch.path_to_geos(p)
self.assertEqual(list(type(geom) for geom in geoms),
[sgeom.Polygon, sgeom.Point])
self.assertEqual(len(geoms[0].interiors), 1)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| gpl-3.0 |
Garrett-R/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
louispotok/pandas | pandas/tests/indexing/test_floats.py | 3 | 28344 | # -*- coding: utf-8 -*-
import pytest
from warnings import catch_warnings
import numpy as np
from pandas import (Series, DataFrame, Index, Float64Index, Int64Index,
RangeIndex)
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
class TestFloatIndexers(object):
def check(self, result, original, indexer, getitem):
"""
comparator for results
we need to take care if we are indexing on a
Series or a frame
"""
if isinstance(original, Series):
expected = original.iloc[indexer]
else:
if getitem:
expected = original.iloc[:, indexer]
else:
expected = original.iloc[indexer]
assert_almost_equal(result, expected)
def test_scalar_error(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
# this duplicates the code below
# but is spefically testing for the error
# message
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex, tm.makeIntIndex,
tm.makeRangeIndex]:
i = index(5)
s = Series(np.arange(len(i)), index=i)
def f():
s.iloc[3.0]
tm.assert_raises_regex(TypeError,
'cannot do positional indexing',
f)
def f():
s.iloc[3.0] = 0
pytest.raises(TypeError, f)
def test_scalar_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeCategoricalIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]:
i = index(5)
for s in [Series(
np.arange(len(i)), index=i), DataFrame(
np.random.randn(
len(i), len(i)), index=i, columns=i)]:
# getting
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.iloc, False),
(lambda x: x, True)]:
def f():
with catch_warnings(record=True):
idxr(s)[3.0]
# gettitem on a DataFrame is a KeyError as it is indexing
# via labels on the columns
if getitem and isinstance(s, DataFrame):
error = KeyError
else:
error = TypeError
pytest.raises(error, f)
# label based can be a TypeError or KeyError
def f():
s.loc[3.0]
if s.index.inferred_type in ['string', 'unicode', 'mixed']:
error = KeyError
else:
error = TypeError
pytest.raises(error, f)
# contains
assert 3.0 not in s
# setting with a float fails with iloc
def f():
s.iloc[3.0] = 0
pytest.raises(TypeError, f)
# setting with an indexer
if s.index.inferred_type in ['categorical']:
# Value or Type Error
pass
elif s.index.inferred_type in ['datetime64', 'timedelta64',
'period']:
# these should prob work
# and are inconsisten between series/dataframe ATM
# for idxr in [lambda x: x.ix,
# lambda x: x]:
# s2 = s.copy()
# def f():
# idxr(s2)[3.0] = 0
# pytest.raises(TypeError, f)
pass
else:
s2 = s.copy()
s2.loc[3.0] = 10
assert s2.index.is_object()
for idxr in [lambda x: x.ix,
lambda x: x]:
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[3.0] = 0
assert s2.index.is_object()
# fallsback to position selection, series only
s = Series(np.arange(len(i)), index=i)
s[3]
pytest.raises(TypeError, lambda: s[3.0])
def test_scalar_with_mixed(self):
s2 = Series([1, 2, 3], index=['a', 'b', 'c'])
s3 = Series([1, 2, 3], index=['a', 'b', 1.5])
# lookup in a pure string index
# with an invalid indexer
for idxr in [lambda x: x.ix,
lambda x: x,
lambda x: x.iloc]:
def f():
with catch_warnings(record=True):
idxr(s2)[1.0]
pytest.raises(TypeError, f)
pytest.raises(KeyError, lambda: s2.loc[1.0])
result = s2.loc['b']
expected = 2
assert result == expected
# mixed index so we have label
# indexing
for idxr in [lambda x: x]:
def f():
idxr(s3)[1.0]
pytest.raises(TypeError, f)
result = idxr(s3)[1]
expected = 2
assert result == expected
# mixed index so we have label
# indexing
for idxr in [lambda x: x.ix]:
with catch_warnings(record=True):
def f():
idxr(s3)[1.0]
pytest.raises(TypeError, f)
result = idxr(s3)[1]
expected = 2
assert result == expected
pytest.raises(TypeError, lambda: s3.iloc[1.0])
pytest.raises(KeyError, lambda: s3.loc[1.0])
result = s3.loc[1.5]
expected = 3
assert result == expected
def test_scalar_integer(self):
# test how scalar float indexers work on int indexes
# integer index
for i in [Int64Index(range(5)), RangeIndex(5)]:
for s in [Series(np.arange(len(i))),
DataFrame(np.random.randn(len(i), len(i)),
index=i, columns=i)]:
# coerce to equal int
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
with catch_warnings(record=True):
result = idxr(s)[3.0]
self.check(result, s, 3, getitem)
# coerce to equal int
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
if isinstance(s, Series):
def compare(x, y):
assert x == y
expected = 100
else:
compare = tm.assert_series_equal
if getitem:
expected = Series(100,
index=range(len(s)), name=3)
else:
expected = Series(100.,
index=range(len(s)), name=3)
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[3.0] = 100
result = idxr(s2)[3.0]
compare(result, expected)
result = idxr(s2)[3]
compare(result, expected)
# contains
# coerce to equal int
assert 3.0 in s
def test_scalar_float(self):
# scalar float indexers work on a float index
index = Index(np.arange(5.))
for s in [Series(np.arange(len(index)), index=index),
DataFrame(np.random.randn(len(index), len(index)),
index=index, columns=index)]:
# assert all operations except for iloc are ok
indexer = index[3]
for idxr, getitem in [(lambda x: x.ix, False),
(lambda x: x.loc, False),
(lambda x: x, True)]:
# getting
with catch_warnings(record=True):
result = idxr(s)[indexer]
self.check(result, s, 3, getitem)
# setting
s2 = s.copy()
def f():
with catch_warnings(record=True):
idxr(s2)[indexer] = expected
with catch_warnings(record=True):
result = idxr(s2)[indexer]
self.check(result, s, 3, getitem)
# random integer is a KeyError
with catch_warnings(record=True):
pytest.raises(KeyError, lambda: idxr(s)[3.5])
# contains
assert 3.0 in s
# iloc succeeds with an integer
expected = s.iloc[3]
s2 = s.copy()
s2.iloc[3] = expected
result = s2.iloc[3]
self.check(result, s, 3, False)
# iloc raises with a float
pytest.raises(TypeError, lambda: s.iloc[3.0])
def g():
s2.iloc[3.0] = 0
pytest.raises(TypeError, g)
def test_slice_non_numeric(self):
# GH 4892
# float_indexers should raise exceptions
# on appropriate Index types & accessors
for index in [tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]:
index = index(5)
for s in [Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index)]:
# getitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
def f():
s.iloc[l]
pytest.raises(TypeError, f)
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x.iloc,
lambda x: x]:
def f():
with catch_warnings(record=True):
idxr(s)[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
def f():
s.iloc[l] = 0
pytest.raises(TypeError, f)
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x.iloc,
lambda x: x]:
def f():
with catch_warnings(record=True):
idxr(s)[l] = 0
pytest.raises(TypeError, f)
def test_slice_integer(self):
# same as above, but for Integer based indexes
# these coerce to a like integer
# oob indicates if we are out of bounds
# of positional indexing
for index, oob in [(Int64Index(range(5)), False),
(RangeIndex(5), False),
(Int64Index(range(5)) + 10, True)]:
# s is an in-range index
s = Series(range(5), index=index)
# getitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(3, 5)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# getitem out-of-bounds
for l in [slice(-6, 6),
slice(-6.0, 6.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
# these are all label indexing
# except getitem which is positional
# empty
if oob:
indexer = slice(0, 0)
else:
indexer = slice(-6, 6)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[slice(-6.0, 6.0)]
pytest.raises(TypeError, f)
# getitem odd floats
for l, res1 in [(slice(2.5, 4), slice(3, 5)),
(slice(2, 3.5), slice(2, 4)),
(slice(2.5, 3.5), slice(3, 4))]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
with catch_warnings(record=True):
result = idxr(s)[l]
if oob:
res = slice(0, 0)
else:
res = res1
self.check(result, s, res, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
for idxr in [lambda x: x.loc,
lambda x: x.ix]:
sc = s.copy()
with catch_warnings(record=True):
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
def f():
s[l] = 0
pytest.raises(TypeError, f)
def test_integer_positional_indexing(self):
""" make sure that we are raising on positional indexing
w.r.t. an integer index """
s = Series(range(2, 6), index=range(2, 6))
result = s[2:4]
expected = s.iloc[2:4]
assert_series_equal(result, expected)
for idxr in [lambda x: x,
lambda x: x.iloc]:
for l in [slice(2, 4.0),
slice(2.0, 4),
slice(2.0, 4.0)]:
def f():
idxr(s)[l]
pytest.raises(TypeError, f)
def test_slice_integer_frame_getitem(self):
# similar to above, but on the getitem dim (of a DataFrame)
for index in [Int64Index(range(5)), RangeIndex(5)]:
s = DataFrame(np.random.randn(5, 2), index=index)
def f(idxr):
# getitem
for l in [slice(0.0, 1),
slice(0, 1.0),
slice(0.0, 1.0)]:
result = idxr(s)[l]
indexer = slice(0, 2)
self.check(result, s, indexer, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# getitem out-of-bounds
for l in [slice(-10, 10),
slice(-10.0, 10.0)]:
result = idxr(s)[l]
self.check(result, s, slice(-10, 10), True)
# positional indexing
def f():
s[slice(-10.0, 10.0)]
pytest.raises(TypeError, f)
# getitem odd floats
for l, res in [(slice(0.5, 1), slice(1, 2)),
(slice(0, 0.5), slice(0, 1)),
(slice(0.5, 1.5), slice(1, 2))]:
result = idxr(s)[l]
self.check(result, s, res, False)
# positional indexing
def f():
s[l]
pytest.raises(TypeError, f)
# setitem
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
sc = s.copy()
idxr(sc)[l] = 0
result = idxr(sc)[l].values.ravel()
assert (result == 0).all()
# positional indexing
def f():
s[l] = 0
pytest.raises(TypeError, f)
f(lambda x: x.loc)
with catch_warnings(record=True):
f(lambda x: x.ix)
def test_slice_float(self):
# same as above, but for floats
index = Index(np.arange(5.)) + 0.1
for s in [Series(range(5), index=index),
DataFrame(np.random.randn(5, 2), index=index)]:
for l in [slice(3.0, 4),
slice(3, 4.0),
slice(3.0, 4.0)]:
expected = s.iloc[3:4]
for idxr in [lambda x: x.ix,
lambda x: x.loc,
lambda x: x]:
# getitem
with catch_warnings(record=True):
result = idxr(s)[l]
if isinstance(s, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
# setitem
s2 = s.copy()
with catch_warnings(record=True):
idxr(s2)[l] = 0
result = idxr(s2)[l].values.ravel()
assert (result == 0).all()
def test_floating_index_doc_example(self):
index = Index([1.5, 2, 3, 4.5, 5])
s = Series(range(5), index=index)
assert s[3] == 2
assert s.loc[3] == 2
assert s.loc[3] == 2
assert s.iloc[3] == 3
def test_floating_misc(self):
# related 236
# scalar/slicing of a float index
s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64)
# label based slicing
result1 = s[1.0:3.0]
result2 = s.loc[1.0:3.0]
result3 = s.loc[1.0:3.0]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# exact indexing when found
result1 = s[5.0]
result2 = s.loc[5.0]
result3 = s.loc[5.0]
assert result1 == result2
assert result1 == result3
result1 = s[5]
result2 = s.loc[5]
result3 = s.loc[5]
assert result1 == result2
assert result1 == result3
assert s[5.0] == s[5]
# value not found (and no fallbacking at all)
# scalar integers
pytest.raises(KeyError, lambda: s.loc[4])
pytest.raises(KeyError, lambda: s.loc[4])
pytest.raises(KeyError, lambda: s[4])
# fancy floats/integers create the correct entry (as nan)
# fancy tests
expected = Series([2, 0], index=Float64Index([5.0, 0.0]))
for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
expected = Series([2, 0], index=Index([5, 0], dtype='int64'))
for fancy_idx in [[5, 0], np.array([5, 0])]: # int
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
# all should return the same as we are slicing 'the same'
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# previously this did fallback indexing
result1 = s[2:5]
result2 = s[2.0:5.0]
result3 = s[2.0:5]
result4 = s[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# combined test
result1 = s.loc[2:5]
result2 = s.loc[2:5]
result3 = s[2:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# list selection
result1 = s[[0.0, 5, 10]]
result2 = s.loc[[0.0, 5, 10]]
result3 = s.loc[[0.0, 5, 10]]
result4 = s.iloc[[0, 2, 4]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result1 = s[[1.6, 5, 10]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result2 = s.loc[[1.6, 5, 10]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result3 = s.loc[[1.6, 5, 10]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
[np.nan, 2, 4], index=[1.6, 5, 10]))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result1 = s[[0, 1, 2]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result2 = s.loc[[0, 1, 2]]
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result3 = s.loc[[0, 1, 2]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series(
[0.0, np.nan, np.nan], index=[0, 1, 2]))
result1 = s.loc[[2.5, 5]]
result2 = s.loc[[2.5, 5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, Series([1, 2], index=[2.5, 5.0]))
result1 = s[[2.5]]
result2 = s.loc[[2.5]]
result3 = s.loc[[2.5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([1], index=[2.5]))
def test_floating_tuples(self):
# see gh-13509
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.1, 0.2], name='foo')
result = s[0.0]
assert result == (1, 1)
expected = Series([(1, 1), (2, 2)], index=[0.0, 0.0], name='foo')
s = Series([(1, 1), (2, 2), (3, 3)], index=[0.0, 0.0, 0.2], name='foo')
result = s[0.0]
tm.assert_series_equal(result, expected)
def test_float64index_slicing_bug(self):
# GH 5557, related to slicing a float index
ser = {256: 2321.0,
1: 78.0,
2: 2716.0,
3: 0.0,
4: 369.0,
5: 0.0,
6: 269.0,
7: 0.0,
8: 0.0,
9: 0.0,
10: 3536.0,
11: 0.0,
12: 24.0,
13: 0.0,
14: 931.0,
15: 0.0,
16: 101.0,
17: 78.0,
18: 9643.0,
19: 0.0,
20: 0.0,
21: 0.0,
22: 63761.0,
23: 0.0,
24: 446.0,
25: 0.0,
26: 34773.0,
27: 0.0,
28: 729.0,
29: 78.0,
30: 0.0,
31: 0.0,
32: 3374.0,
33: 0.0,
34: 1391.0,
35: 0.0,
36: 361.0,
37: 0.0,
38: 61808.0,
39: 0.0,
40: 0.0,
41: 0.0,
42: 6677.0,
43: 0.0,
44: 802.0,
45: 0.0,
46: 2691.0,
47: 0.0,
48: 3582.0,
49: 0.0,
50: 734.0,
51: 0.0,
52: 627.0,
53: 70.0,
54: 2584.0,
55: 0.0,
56: 324.0,
57: 0.0,
58: 605.0,
59: 0.0,
60: 0.0,
61: 0.0,
62: 3989.0,
63: 10.0,
64: 42.0,
65: 0.0,
66: 904.0,
67: 0.0,
68: 88.0,
69: 70.0,
70: 8172.0,
71: 0.0,
72: 0.0,
73: 0.0,
74: 64902.0,
75: 0.0,
76: 347.0,
77: 0.0,
78: 36605.0,
79: 0.0,
80: 379.0,
81: 70.0,
82: 0.0,
83: 0.0,
84: 3001.0,
85: 0.0,
86: 1630.0,
87: 7.0,
88: 364.0,
89: 0.0,
90: 67404.0,
91: 9.0,
92: 0.0,
93: 0.0,
94: 7685.0,
95: 0.0,
96: 1017.0,
97: 0.0,
98: 2831.0,
99: 0.0,
100: 2963.0,
101: 0.0,
102: 854.0,
103: 0.0,
104: 0.0,
105: 0.0,
106: 0.0,
107: 0.0,
108: 0.0,
109: 0.0,
110: 0.0,
111: 0.0,
112: 0.0,
113: 0.0,
114: 0.0,
115: 0.0,
116: 0.0,
117: 0.0,
118: 0.0,
119: 0.0,
120: 0.0,
121: 0.0,
122: 0.0,
123: 0.0,
124: 0.0,
125: 0.0,
126: 67744.0,
127: 22.0,
128: 264.0,
129: 0.0,
260: 197.0,
268: 0.0,
265: 0.0,
269: 0.0,
261: 0.0,
266: 1198.0,
267: 0.0,
262: 2629.0,
258: 775.0,
257: 0.0,
263: 0.0,
259: 0.0,
264: 163.0,
250: 10326.0,
251: 0.0,
252: 1228.0,
253: 0.0,
254: 2769.0,
255: 0.0}
# smoke test for the repr
s = Series(ser)
result = s.value_counts()
str(result)
| bsd-3-clause |
lorenzo-desantis/mne-python | examples/realtime/rt_feedback_server.py | 11 | 4945 | """
==============================================
Real-time feedback for decoding :: Server Side
==============================================
This example demonstrates how to setup a real-time feedback
mechanism using StimServer and StimClient.
The idea here is to display future stimuli for the class which
is predicted less accurately. This allows on-demand adaptation
of the stimuli depending on the needs of the classifier.
To run this example, open ipython in two separate terminals.
In the first, run rt_feedback_server.py and then wait for the
message
RtServer: Start
Once that appears, run rt_feedback_client.py in the other terminal
and the feedback script should start.
All brain responses are simulated from a fiff file to make it easy
to test. However, it should be possible to adapt this script
for a real experiment.
"""
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
import mne
from mne.datasets import sample
from mne.realtime import StimServer
from mne.realtime import MockRtClient
from mne.decoding import EpochsVectorizer, FilterEstimator
print(__doc__)
# Load fiff file to simulate data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.Raw(raw_fname, preload=True)
# Instantiating stimulation server
# The with statement is necessary to ensure a clean exit
with StimServer('localhost', port=4218) as stim_server:
# The channels to be used while decoding
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
rt_client = MockRtClient(raw)
# Constructing the pipeline for classification
filt = FilterEstimator(raw.info, 1, 40)
scaler = preprocessing.StandardScaler()
vectorizer = EpochsVectorizer()
clf = SVC(C=1, kernel='linear')
concat_classifier = Pipeline([('filter', filt), ('vector', vectorizer),
('scaler', scaler), ('svm', clf)])
stim_server.start(verbose=True)
# Just some initially decided events to be simulated
# Rest will decided on the fly
ev_list = [4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4]
score_c1, score_c2, score_x = [], [], []
for ii in range(50):
# Tell the stim_client about the next stimuli
stim_server.add_trigger(ev_list[ii])
# Collecting data
if ii == 0:
X = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')[None, ...]
y = ev_list[ii]
else:
X_temp = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')
X_temp = X_temp[np.newaxis, ...]
X = np.concatenate((X, X_temp), axis=0)
time.sleep(1) # simulating the isi
y = np.append(y, ev_list[ii])
# Start decoding after collecting sufficient data
if ii >= 10:
# Now start doing rtfeedback
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2,
random_state=7)
y_pred = concat_classifier.fit(X_train, y_train).predict(X_test)
cm = confusion_matrix(y_test, y_pred)
score_c1.append(float(cm[0, 0]) / sum(cm, 1)[0] * 100)
score_c2.append(float(cm[1, 1]) / sum(cm, 1)[1] * 100)
# do something if one class is decoded better than the other
if score_c1[-1] < score_c2[-1]:
print("We decoded class RV better than class LV")
ev_list.append(3) # adding more LV to future simulated data
else:
print("We decoded class LV better than class RV")
ev_list.append(4) # adding more RV to future simulated data
# Clear the figure
plt.clf()
# The x-axis for the plot
score_x.append(ii)
# Now plot the accuracy
plt.plot(score_x[-5:], score_c1[-5:])
plt.hold(True)
plt.plot(score_x[-5:], score_c2[-5:])
plt.xlabel('Trials')
plt.ylabel('Classification score (% correct)')
plt.title('Real-time feedback')
plt.ylim([0, 100])
plt.xticks(score_x[-5:])
plt.legend(('LV', 'RV'), loc='upper left')
plt.show()
| bsd-3-clause |
edoddridge/aronnax | benchmarks/benchmark.py | 1 | 9842 | import pickle as pkl
import os.path as p
import sys
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from aronnax.utils import working_directory
import aronnax.driver as aro
self_path = p.dirname(p.abspath(__file__))
root_path = p.dirname(self_path)
n_time_steps = 502.0
scale_factor = 1000 / n_time_steps # Show times in ms
def benchmark_gaussian_bump_red_grav_save(grid_points):
run_time_O1 = np.zeros(len(grid_points))
run_time_Ofast = np.zeros(len(grid_points))
def bump(X, Y):
return 500. + 20*np.exp(-((6e5-X)**2 + (5e5-Y)**2)/(2*1e5**2))
with working_directory(p.join(self_path, "beta_plane_bump_red_grav")):
aro_exec = "aronnax_test"
for counter, nx in enumerate(grid_points):
run_time_O1[counter] = aro.simulate(
exe=aro_exec, initHfile=[bump], nx=nx, ny=nx)
aro_exec = "aronnax_core"
for counter, nx in enumerate(grid_points):
run_time_Ofast[counter] = aro.simulate(
exe=aro_exec, initHfile=[bump], nx=nx, ny=nx)
with open("times.pkl", "wb") as f:
pkl.dump((grid_points, run_time_O1, run_time_Ofast), f)
def benchmark_gaussian_bump_red_grav_plot():
with working_directory(p.join(self_path, "beta_plane_bump_red_grav")):
with open("times.pkl", "rb") as f:
(grid_points, run_time_O1, run_time_Ofast) = pkl.load(f)
plt.figure()
plt.loglog(grid_points, run_time_O1*scale_factor,
'-*', label='aronnax_test')
plt.loglog(grid_points, run_time_Ofast*scale_factor,
'-*', label='aronnax_core')
scale = scale_factor * run_time_O1[-7]/(grid_points[-7]**2)
plt.loglog(grid_points, scale*grid_points**2,
':', label='O(nx**2)', color='black', linewidth=0.5)
plt.legend()
plt.xlabel('Resolution (grid cells on one side)')
plt.ylabel('Avg time per integration step (ms)')
plt.title('Runtime scaling of a 1.5-layer Aronnax simulation on a square grid')
plt.savefig('beta_plane_bump_red_grav_scaling.png', dpi=150)
def benchmark_gaussian_bump_red_grav(grid_points):
benchmark_gaussian_bump_red_grav_save(grid_points)
benchmark_gaussian_bump_red_grav_plot()
def benchmark_gaussian_bump_save(grid_points):
run_time_O1 = np.zeros(len(grid_points))
run_time_Ofast = np.zeros(len(grid_points))
run_time_hypre_test = np.zeros(len(grid_points))
run_time_hypre = np.zeros(len(grid_points))
def bump(X, Y):
return 500. + 20*np.exp(-((6e5-X)**2 + (5e5-Y)**2)/(2*1e5**2))
with working_directory(p.join(self_path, "beta_plane_bump")):
aro_exec = "aronnax_test"
for counter, nx in enumerate(grid_points):
run_time_O1[counter] = aro.simulate(
exe=aro_exec, initHfile=[bump, lambda X, Y: 2000. - bump(X, Y)], nx=nx, ny=nx)
aro_exec = "aronnax_core"
for counter, nx in enumerate(grid_points):
run_time_Ofast[counter] = aro.simulate(
exe=aro_exec, initHfile=[bump, lambda X, Y: 2000. - bump(X, Y)], nx=nx, ny=nx)
aro_exec = "aronnax_external_solver_test"
for counter, nx in enumerate(grid_points[:9]):
run_time_hypre_test[counter] = aro.simulate(
exe=aro_exec, initHfile=[bump, lambda X, Y: 2000. - bump(X, Y)], nx=nx, ny=nx)
aro_exec = "aronnax_external_solver"
for counter, nx in enumerate(grid_points[:9]):
run_time_hypre[counter] = aro.simulate(
exe=aro_exec, initHfile=[bump, lambda X, Y: 2000. - bump(X, Y)], nx=nx, ny=nx)
with open("times.pkl", "wb") as f:
pkl.dump((grid_points, run_time_O1, run_time_Ofast,
run_time_hypre_test, run_time_hypre
), f)
def benchmark_gaussian_bump_plot():
with working_directory(p.join(self_path, "beta_plane_bump")):
with open("times.pkl", "rb") as f:
(grid_points, run_time_O1, run_time_Ofast,
run_time_hypre_test, run_time_hypre
) = pkl.load(f)
plt.figure()
plt.loglog(grid_points, run_time_O1*scale_factor,
'-*', label='aronnax_test')
plt.loglog(grid_points, run_time_Ofast*scale_factor,
'-*', label='aronnax_core')
plt.loglog(grid_points, run_time_hypre_test*scale_factor,
'-o', label='aronnax_external_solver_test')
plt.loglog(grid_points, run_time_hypre*scale_factor,
'-o', label='aronnax_external_solver')
scale = scale_factor * run_time_O1[3]/(grid_points[3]**3)
plt.loglog(grid_points, scale*grid_points**3,
':', label='O(nx**3)', color='black', linewidth=0.5)
scale = scale_factor * run_time_hypre[3]/(grid_points[3]**2)
plt.loglog(grid_points, scale*grid_points**2,
':', label='O(nx**2)', color='blue', linewidth=0.5)
plt.legend()
plt.xlabel('Resolution (grid cells on one side)')
plt.ylabel('Avg time per integration step (ms)')
plt.title('Runtime scaling of a 2-layer Aronnax simulation\nwith bathymetry on a square grid')
plt.savefig('beta_plane_bump_scaling.png', dpi=150)
def benchmark_gaussian_bump(grid_points):
benchmark_gaussian_bump_save(grid_points)
benchmark_gaussian_bump_plot()
def benchmark_parallel_gaussian_bump_red_grav_save(n_procs):
run_time = np.zeros(len(n_procs))
nx = 480
def bump(X, Y):
return 500. + 20*np.exp(-((6e5-X)**2 + (5e5-Y)**2)/(2*1e5**2))
with working_directory(p.join(self_path, "beta_plane_bump_red_grav")):
aro_exec = "aronnax_core"
for counter, nProcX in enumerate(n_procs):
if nProcX == 1:
run_time[counter] = aro.simulate(
exe=aro_exec, initHfile=[bump], nx=nx, ny=nx)
else:
run_time[counter] = aro.simulate(
exe=aro_exec, initHfile=[bump],
nx=nx, ny=nx, nProcX=nProcX)
with open("mpi_times.pkl", "wb") as f:
pkl.dump((n_procs, run_time), f)
def benchmark_parallel_gaussian_bump_red_grav_plot():
with working_directory(p.join(self_path, "beta_plane_bump_red_grav")):
with open("mpi_times.pkl", "rb") as f:
(n_procs, run_time) = pkl.load(f)
plt.figure()
plt.loglog(n_procs, run_time*scale_factor,
'-*', label='aronnax_core')
scale = scale_factor * run_time[0]
plt.loglog(n_procs, scale/n_procs,
':', label='O(1/n)', color='black', linewidth=0.5)
plt.legend()
plt.xlabel('Number of processors')
plt.ylabel('Avg time per integration step (ms)')
plt.title('Runtime scaling of a 1.5-layer Aronnax simulation\n on a square grid')
plt.savefig('beta_plane_bump_mpi_scaling.png', dpi=150, bbox_inches='tight')
def benchmark_parallel_gaussian_bump_red_grav(n_procs):
benchmark_parallel_gaussian_bump_red_grav_save(n_procs)
benchmark_parallel_gaussian_bump_red_grav_plot()
def benchmark_parallel_gaussian_bump_save(n_procs):
run_time = np.zeros(len(n_procs))
nx = 120
def bump(X, Y):
return 500. + 20*np.exp(-((6e5-X)**2 + (5e5-Y)**2)/(2*1e5**2))
with working_directory(p.join(self_path, "beta_plane_bump")):
aro_exec = "aronnax_external_solver"
for counter, nProcX in enumerate(n_procs):
if nProcX == 1:
run_time[counter] = aro.simulate(
exe=aro_exec, initHfile=[bump, lambda X, Y: 2000. - bump(X, Y)],
nx=nx, ny=nx)
else:
run_time[counter] = aro.simulate(
exe=aro_exec, initHfile=[bump, lambda X, Y: 2000. - bump(X, Y)],
nx=nx, ny=nx, nProcX=nProcX)
with open("mpi_times.pkl", "wb") as f:
pkl.dump((n_procs, run_time), f)
def benchmark_parallel_gaussian_bump_plot():
with working_directory(p.join(self_path, "beta_plane_bump")):
with open("mpi_times.pkl", "rb") as f:
(n_procs, run_time) = pkl.load(f)
plt.figure()
plt.loglog(n_procs, run_time*scale_factor,
'-*', label='aronnax_external_solver')
scale = scale_factor * run_time[0]
plt.loglog(n_procs, scale/n_procs,
':', label='O(1/n)', color='black', linewidth=0.5)
plt.legend()
plt.xlabel('Number of processors')
plt.ylabel('Avg time per integration step (ms)')
plt.title('Runtime scaling of a 2-layer Aronnax simulation\n on a square grid')
plt.savefig('beta_plane_bump_mpi_scaling.png', dpi=150, bbox_inches='tight')
def benchmark_parallel_gaussian_bump(n_procs):
benchmark_parallel_gaussian_bump_save(n_procs)
benchmark_parallel_gaussian_bump_plot()
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "save":
benchmark_gaussian_bump_red_grav_save(np.array([10, 20, 40, 60, 80, 100, 150, 200, 300, 400, 500]))
benchmark_gaussian_bump_save(np.array([10, 20, 40, 60, 80, 100, 120]))
benchmark_parallel_gaussian_bump_red_grav_save([1,2,3,4])
benchmark_parallel_gaussian_bump_save([1,2,3,4])
else:
benchmark_gaussian_bump_red_grav_plot()
benchmark_gaussian_bump_plot()
benchmark_parallel_gaussian_bump_red_grav_plot()
benchmark_parallel_gaussian_bump_plot()
else:
benchmark_gaussian_bump_red_grav(np.array([10, 20, 40, 60, 80, 100, 150, 200, 300, 400, 500]))
benchmark_gaussian_bump(np.array([10, 20, 40, 60, 80, 100, 120]))
benchmark_parallel_gaussian_bump_red_grav([1,2,3,4])
benchmark_parallel_gaussian_bump([1,2,3,4])
| mit |
VictorLoren/hanging-fabric-calculator | pycatenary.py | 1 | 2951 | # catenary calculation, re-written in python - NO Elasticity!!!
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
def cat(a):
# defining catenary function
#catenary eq (math): a*sinh(L/(2*a)+atanh(d/S))+a*sinh(L/(2*a)-atanh(d/S))-S=0
return a*math.sinh(L/(2*a))+math.atanh(d/S)+a*math.sinh(L/(2*a))-math.atanh(d/S)-S
L=float(input("Horizontal Distance between supports [m]: "))
d=float(input ("Vertical Distance between supports [m]: "))
S=float(input("Length of cable [m] - must be greater than distance between supports: "))
w=float(input("Unit weight of cable [kg/m]: "))
za=float(input("Elevation of higher support from reference plane [m]: "))
#checking if cable length is bigger than total distance between supports
distance=(L**2+d**2)**0.5
if S <= distance:
print ("Length of cable must be greater than TOTAL distance between supports!")
S=float(input("Length of cable [m]: "))
else:
pass
# solving catenary function for 'a'
a=fsolve(cat, 1)
# hor. distance between lowest catenary point (P) to higher support point (La)
La=a*(L/(2*a)+math.atanh(d/S))
# hor. distance between lowest catenary point (P) to lower support point (Lb)
Lb=L-La
# vert. distance from higher support point to lowest point (P) in catenary (ha)
ha=a*math.cosh(La/a)-a
## calculating reaction forces and angles
# catenary lenght between support "A" (higher) and "P" - Sa
Sa=a*math.sinh(La/a)
# catenary lenght between support "B" )lower) and "P" - Sb
Sb=a*math.sinh(Lb/a)
# horizontal tension - constant through catenary: H
H=w*a
# vertical tension at "A" (Va) and "B" (Vb)
Va=Sa*w
Vb=Sb*w
# tension at "A" (TA) and B (TB)
TA=(H**2+Va**2)**0.5
TB=(H**2+Vb**2)**0.5
# inclination angles from vertical at "A" (ThetA) and B (ThetB)
ThetA=math.atan(H/Va)
ThetB=math.atan(H/Vb)
ThetAd=ThetA*180/math.pi;
ThetBd=ThetB*180/math.pi;
# establishing A, B and P in coordinate system
# index "a" corresponding to point "A", "b" to "B"-point and "P" to lowest caten. point
zb=za-d
zp=za-ha
xa=La
xp=0
xb=-Lb
# printing results to file
print "Horizontal Distance between supports in meters: ", round(L,3)
print "Catenary length in meters: ", round(S,3)
print "Vertical Distance Between supports in meters: ", round(d,3)
print "Unit Weight of Catenary line in kg/m: ", round(w,3)
print "Elevation of higher support (A) from reference plane in meters: ", round(za,3)
print "\Catenary coef.: ", round(a,5)
print "Horizontal tension in kg (constant along line: ", round(H,3)
print "Vertical tension in A in kg: ", round(Va,3)
print "Total tension in A in kg: ", round(TA,3)
print "Total tension in B in kg: ", round(TB,3)
print "Inclination angle from vertical at A in radians: ", round(ThetA,3)
print "Inclination angle from vertical at B in radians: ", round(ThetB,3)
print "Inclination angle from vertical at A in degrees: ", round(ThetAd,3)
print "Inclination angle from vertical at B in degrees: ", round(ThetBd,3)
| mit |
samuel1208/scikit-learn | sklearn/utils/testing.py | 47 | 23587 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
jmschrei/scikit-learn | sklearn/linear_model/logistic.py | 4 | 65682 |
"""
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver in ['liblinear', 'sag']:
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='balanced'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
lbin = LabelBinarizer()
Y_binarized = lbin.fit_transform(y)
if Y_binarized.shape[1] == 1:
Y_binarized = np.hstack([1 - Y_binarized, Y_binarized])
w0 = np.zeros((Y_binarized.shape[1], n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_binarized
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
warm_start_sag = {'coef': w0}
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, 'log', 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum,
warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
max_iter : int
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for 'lbfgs' and
'newton-cg' solvers.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
hcmlab/nova | bin/PythonScripts/ImageExplainerInnvestigate.py | 2 | 7523 | import sys
class CatchOutErr:
def __init__(self):
self.value = ''
def write(self, txt):
self.value += txt
catchOutErr = CatchOutErr()
sys.stderr = catchOutErr
from tensorflow.keras.preprocessing import image as kerasimg
from tensorflow.keras.models import load_model
import os
import numpy as np
import io as inputoutput
from PIL import Image as pilimage
import numpy as np
import innvestigate
import innvestigate.utils as iutils
import innvestigate.utils.visualizations as ivis
import matplotlib.pyplot as plot
import ast
import multiprocessing
def transform_img_fn(img):
img = pilimage.open(inputoutput.BytesIO(bytes(img)))
imgR = img.resize((224, 224))
x = kerasimg.img_to_array(imgR)
x = np.expand_dims(x, axis=0)
return (x, img)
def loadModel(modelPath):
try:
model = load_model(modelPath)
except Exception as ex:
return None
return model
def getTopXpredictions(prediction, topLabels):
prediction_class = []
for i in range(0, len(prediction[0])):
prediction_class.append((i, prediction[0][i]))
prediction_class.sort(key=lambda x: x[1], reverse=True)
return prediction_class[:topLabels]
def explain(model, img, postprocess, explainer, args):
explainDict = {}
if len(args) > 0:
explainDict = ast.literal_eval(args)
explanation = []
img, oldImg = transform_img_fn(img)
img = img*(1./255)
prediction = model.predict(img)
topClass = getTopXpredictions(prediction, 1)
model_wo_sm = iutils.keras.graph.model_wo_softmax(model)
analyzer = []
if explainer == "GUIDEDBACKPROP":
analyzer = innvestigate.analyzer.GuidedBackprop(model_wo_sm)
elif explainer == "GRADIENT":
analyzer = innvestigate.analyzer.Gradient(model_wo_sm)
elif explainer == "DECONVNET":
analyzer = innvestigate.analyzer.Deconvnet(model_wo_sm)
elif explainer == "LRPEPSILON":
analyzer = innvestigate.analyzer.LRPEpsilon(model_wo_sm)
elif explainer == "LRPZ":
analyzer = innvestigate.analyzer.LRPZ(model_wo_sm)
elif explainer == "LRPALPHABETA":
analyzer = innvestigate.analyzer.LRPAlphaBeta(model_wo_sm, alpha=explainDict['lrpalpha'], beta=explainDict['lrpbeta'])
elif explainer == "DEEPTAYLOR":
analyzer = innvestigate.analyzer.DeepTaylor(model_wo_sm)
# Applying the analyzer
analysis = analyzer.analyze(img)
imgFinal = []
if postprocess == "GRAYMAP":
imgFinal = graymap(analysis)[0]
elif postprocess =="HEATMAP":
imgFinal = heatmap(analysis)[0]
elif postprocess == "BK_PROJ":
imgFinal = bk_proj(analysis)[0]
elif postprocess == "GNUPLOT2":
imgFinal = heatmapgnuplot2(analysis)[0]
elif postprocess == "CMRMAP":
imgFinal = heatmapCMRmap(analysis)[0]
elif postprocess == "NIPY_SPECTRAL":
imgFinal = heatmapnipy_spectral(analysis)[0]
elif postprocess == "RAINBOW":
imgFinal = heatmap_rainbow(analysis)[0]
elif postprocess == "INFERNO":
imgFinal = heatmap_inferno(analysis)[0]
elif postprocess == "GIST_HEAT":
imgFinal = heatmap_gist_heat(analysis)[0]
elif postprocess == "VIRIDIS":
imgFinal = heatmap_viridis(analysis)[0]
imgFinal = np.uint8(imgFinal*255)
img = pilimage.fromarray(imgFinal)
imgByteArr = inputoutput.BytesIO()
img.save(imgByteArr, format='JPEG')
imgByteArr = imgByteArr.getvalue()
explanation = (topClass[0][0], topClass[0][1], imgByteArr)
return explanation
def lrpalphatest(asdf):
print(asdf)
modelPath = "F:/test/pokemon.trainer.PythonModel.model.keras_vgg_face.h5"
img_path = "F:/test/pikachu.jpeg"
img = pilimage.open(img_path)
imgByteArr = inputoutput.BytesIO()
img.save(imgByteArr, format='JPEG')
imgByteArr = imgByteArr.getvalue()
img, oldImg = transform_img_fn(imgByteArr)
img = img*(1./255)
model = load_model(modelPath)
model_wo_sm = iutils.keras.graph.model_wo_softmax(model)
# Creating an analyzer
gradient_analyzer = innvestigate.analyzer.LRPAlphaBeta(model_wo_sm, alpha=1, beta=0)
analysis = gradient_analyzer.analyze(img)
testfilter = heatmap_rainbow(analysis)[0]
plot.imshow(testfilter)
plot.show()
imgFinal = graymap(analysis)[0]
imgFinal = np.uint8(imgFinal*255)
img = pilimage.fromarray(imgFinal)
imgByteArr = inputoutput.BytesIO()
img.save(imgByteArr, format='JPEG')
imgByteArr = imgByteArr.getvalue()
plot.imshow(graymap(analysis)[0])
plot.show()
def test():
modelPath = "C:/Users/Alex Heimerl/Desktop/test/pokemon.trainer.PythonModel.model.keras_vgg_face.h5"
img_path = "C:/Users/Alex Heimerl/Desktop/test/pikachu.jpeg"
img = pilimage.open(img_path)
imgByteArr = inputoutput.BytesIO()
img.save(imgByteArr, format='JPEG')
imgByteArr = imgByteArr.getvalue()
img, oldImg = transform_img_fn(imgByteArr)
img = img*(1./255)
model = load_model(modelPath)
model_wo_sm = iutils.keras.graph.model_wo_softmax(model)
# Creating an analyzer
gradient_analyzer = innvestigate.analyzer.GuidedBackprop(model_wo_sm)
# Applying the analyzer
# analysis = gradient_analyzer.analyze(img)
testanalyzer = []
testanalyzer = innvestigate.analyzer.DeepTaylor(model_wo_sm)
analysis = testanalyzer.analyze(img)
testfilter = heatmap_rainbow(analysis)[0]
plot.imshow(testfilter)
plot.show()
imgFinal = graymap(analysis)[0]
imgFinal = np.uint8(imgFinal*255)
img = pilimage.fromarray(imgFinal)
imgByteArr = inputoutput.BytesIO()
img.save(imgByteArr, format='JPEG')
imgByteArr = imgByteArr.getvalue()
plot.imshow(graymap(analysis)[0])
plot.show()
def preprocess(X, net):
X = X.copy()
X = net["preprocess_f"](X)
return X
def postprocess(X, color_conversion, channels_first):
X = X.copy()
X = iutils.postprocess_images(
X, color_coding=color_conversion, channels_first=channels_first)
return X
def image(X):
X = X.copy()
return ivis.project(X, absmax=255.0, input_is_postive_only=True)
def bk_proj(X):
X = ivis.clip_quantile(X, 1)
return ivis.project(X)
def heatmap(X):
X = ivis.gamma(X, minamp=0, gamma=0.95)
return ivis.heatmap(X)
def heatmapgnuplot2(X):
X = np.abs(X)
return ivis.heatmap(X, cmap_type="gnuplot2", input_is_postive_only=True)
def heatmapCMRmap(X):
X = np.abs(X)
return ivis.heatmap(X, cmap_type="CMRmap", input_is_postive_only=True)
def heatmapnipy_spectral(X):
X = np.abs(X)
return ivis.heatmap(X, cmap_type="nipy_spectral", input_is_postive_only=True)
def heatmap_rainbow(X):
X = np.abs(X)
return ivis.heatmap(X, cmap_type="rainbow", input_is_postive_only=True)
def heatmap_inferno(X):
X = np.abs(X)
return ivis.heatmap(X, cmap_type="inferno", input_is_postive_only=True)
def heatmap_viridis(X):
X = np.abs(X)
return ivis.heatmap(X, cmap_type="viridis", input_is_postive_only=True)
def heatmap_gist_heat(X):
X = np.abs(X)
return ivis.heatmap(X, cmap_type="gist_heat", input_is_postive_only=True)
def graymap(X):
return ivis.graymap(np.abs(X), input_is_postive_only=True)
def test_explain(asdf):
result = []
p = multiprocessing.Process(target=lrpalphatest, args=(asdf,))
result.append(p)
p.start()
p.join()
print(result)
return (None, None)
if __name__ == '__main__':
# lrpalphatest()
test_explain("test")
| gpl-3.0 |
lanfker/tdma_imac | src/flow-monitor/examples/wifi-olsr-flowmon.py | 27 | 7354 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.RandomVariableValue(ns.core.ConstantVariable(1)))
onOffHelper.SetAttribute("OffTime", ns.core.RandomVariableValue(ns.core.ConstantVariable(0)))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
app.Start(ns.core.Seconds(ns.core.UniformVariable(20, 30).GetValue()))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
KaceyLeavitt/Tetrahedra_Integration | Tetrahedra_Integration/greens_function.py | 1 | 12983 | import scipy
import scipy.integrate as integrate
import mpmath
import numpy as np
import math
import cmath
import matplotlib.pyplot as plt
from scipy.misc import derivative
"""Solves the integral of the Lattice Green's Function for FCC, BCC,
and simple cubic lattices. Based on the work in the paper "Lattice
Green's Functions for the Cubic Lattices in Terms of the Complete
Elliptic Integral" by Tohru Morita and Tsuyoshi and Tsuyoshi
Horiguchi found in Journal of Mathematical Physics 12, 981 (1971);
doi: 10.1063/1.1665692"""
def k_fcc(t, x):
"""Calculates the value of the complex modulus k in the
paper."""
k = 2 * complex(t + math.cos(x) ** 2) ** .5 / (t + 1)
return k
def k_bcc(t, x):
"""Calculates the value of the complex modulus k in the
paper."""
k = math.cos(x) / t
return k
def k_simple(t, gamma, x):
"""Calculates the value of the complex modulus k in the
paper."""
k = 2 / (t - gamma * math.cos(x))
return k
def generate_path(a, b):
"""Generates the most direct path in the complex plane between
points a and b."""
real_path = lambda x: np.real(a) + x * (np.real(b) - np.real(a))
imag_path = lambda x: np.imag(a) + x * (np.imag(b) - np.imag(a))
return real_path, imag_path
def complex_quadrature(func, a, b, **kwargs):
"""Performs Gaussian quadrature for the input function func
along the most direct path in the complex plane between points
a and b."""
real_path, imag_path = generate_path(a, b)
def real_func(x):
"""Returns the function to be integrated to produce the
real portion of the complex integral."""
return scipy.real(func(x)) * \
derivative(real_path, x, dx=1e-6) - \
scipy.imag(func(x)) * derivative(imag_path, x,
dx=1e-6)
def imag_func(x):
"""Returns the function to be integrated to produce the
imaginary portion of the complex integral."""
return scipy.imag(func(x)) * \
derivative(real_path, x, dx=1e-6) + \
scipy.real(func(x)) * derivative(imag_path, x,
dx=1e-6)
real_integral = integrate.quad(real_func, 0, 1, **kwargs)
imag_integral = integrate.quad(imag_func, 0, 1, **kwargs)
return (real_integral[0] + 1j*imag_integral[0])
def density_of_states_fcc(s):
density_of_states = 0
if s <= -1 or s > 3:
raise ValueError("Error: s must be a value between -1 and 3")
elif s < 0:
density_of_states = 4. / (math.pi ** 2. * (s + 1.)) * \
(integrate.quad(lambda x:
np.real(1. / k_fcc(s, x) * \
complex(mpmath.ellipk((
k_fcc(s, x) ** 2 - 1.) ** .5 / \
k_fcc(s, x)))), 0.,
math.acos((1. - s) / 2.))[0] + 2 * \
integrate.quad(lambda x:
np.real(complex(mpmath.ellipk((1 - \
k_fcc(s, x) ** 2) ** .5))),
math.acos((1. - s) / 2.),
math.acos((-1 * s) ** .5))[0] + 2 * \
integrate.quad(lambda x:
np.real(1. / (1. - k_fcc(s, x) ** \
2) ** .5 * complex(mpmath.ellipk(1. / \
(1 - k_fcc(s, x) ** 2) ** .5))),
math.acos((-1 * s) ** .5), math.pi / \
2)[0])
elif s < 1:
density_of_states = 4. / (math.pi ** 2. * (s + 1.)) * (integrate.quad(lambda x: np.real(1. / k_fcc(s, x) * complex(mpmath.ellipk((k_fcc(s, x) ** 2 - 1.) ** .5 / k_fcc(s, x)))), 0., math.acos((1. - s) / 2.))[0] + 2 * integrate.quad(lambda x: np.real(complex(mpmath.ellipk((1 - k_fcc(s, x) ** 2) ** .5))), math.acos((1. - s) / 2.), math.pi / 2.)[0])
else:
density_of_states = 4. / (math.pi ** 2. * (s + 1.)) * integrate.quad(lambda x: np.real(1. / k_fcc(s, x) * complex(mpmath.ellipk((k_fcc(s, x) ** 2 - 1) ** .5 / k_fcc(s, x)))), 0, math.acos((s - 1.) / 2.))[0]
return density_of_states
# def number_of_states_fcc(s):
# number_of_states = 0
# if s <= -1 or s > 3:
# raise ValueError("Error: s must be a value between -1 and 3")
# else:
# #number_of_states = integrate.quad(lambda x: density_of_states_fcc(x), -.7, 0) + \
# number_of_states = integrate.quad(lambda x: density_of_states_fcc(x), .05, s)
# return number_of_states
def plot_density_of_states_fcc():
plotting_range = np.linspace(-0.95, 2.9, 100, endpoint=True)
for s in plotting_range:
plt.scatter(s, density_of_states_fcc(s), c='b', marker='.')
plt.show()
# def plot_number_of_states_fcc():
# plotting_range = np.linspace(.1, 2.9, 15, endpoint=True)
# for s in plotting_range:
# plt.scatter(s, number_of_states_fcc(s)[0])
# plt.show()
def density_of_states_bcc(s):
density_of_states = 0
if s <= 0 or s > 1:
raise ValueError("s must be between 0 and 1.")
else:
density_of_states = 4 / (math.pi ** 2 * s) * integrate.quad(lambda x: 1 / k_bcc(s, x) * mpmath.ellipk((k_bcc(s, x) ** 2 - 1) ** 0.5 / k_bcc(s, x)), 0, math.acos(s))[0]
return density_of_states
def plot_density_of_states_bcc():
plotting_range = np.linspace(0.1, 1, 100, endpoint=True)
for s in plotting_range:
plt.scatter(s, density_of_states_bcc(s), c='b', marker='.')
plt.show()
def density_of_states_simple(s):
density_of_states = 0
gamma = 1
if s > 1 and s < 3:
density_of_states = (1 / math.pi) ** 2 * integrate.quad(lambda x: mpmath.ellipk((k_simple(s, gamma, x) ** 2 - 1) ** 0.5 / k_simple(s, gamma, x)), 0, math.acos((s - 2) / gamma))[0]
elif s > 0 and s < 1:
density_of_states = (1 / math.pi) ** 2 * integrate.quad(lambda x: mpmath.ellipk(((k_simple(s, gamma, x) ** 2 - 1) ** 0.5) / k_simple(s, gamma, x)), 0, math.pi, limit=10000, points=[math.acos(s / gamma)])[0]
return density_of_states
def plot_density_of_states_simple():
plotting_range = np.linspace(0.1, 2.9, 100, endpoint=True)
for s in plotting_range:
plt.scatter(s, density_of_states_simple(s), c='b', marker='.')
plt.show()
epsilon = .000001j
gamma = 1
N = 11
# t_points = np.linspace(0, 10, N, endpoint=True)
s_3_to_10 = np.linspace(3, 10, N, endpoint=True)
s_1_to_3 = np.linspace(1, 3, N, endpoint=False)
s_0_to_1 = np.linspace(0, 1, N, endpoint=True)
s_3_to_6 = np.linspace(3, 6, N, endpoint=True)
s_neg_1_to_0 = np.linspace(-.99, 0, N, endpoint=True)
# for s in s_3_to_10:
# #print(complex(mpmath.ellipk(cmath.sqrt(k(t, gamma, 1) ** 2 - 1) / k(t, gamma, 1))))
# #print((1 / math.pi) ** 2 * complex_quadrature(lambda x: complex(mpmath.ellipk(cmath.sqrt(k(t, gamma, x) ** 2 - 1) / k(t, gamma, x))), np.absolute(cmath.acos((s + 2)/ gamma)), np.absolute(cmath.acos((s - 2)/ gamma))))
# #print(cmath.acos((s - 2)/ gamma))
# #G_i = (1 / math.pi) ** 2 * complex_quadrature(lambda x: complex(mpmath.ellipk(cmath.sqrt(k(t, gamma, x) ** 2 - 1) / k(t, gamma, x))), cmath.acos((s + 2)/ gamma), cmath.acos((s - 2)/ gamma))
# # G_r = (1 / math.pi) ** 2 * (-1 * complex_quadrature(lambda x: abs(k(t, gamma, x)) * complex(mpmath.ellipk(abs(k(t, gamma, x)))), 0, cmath.acos((s + 2)/ gamma)) - complex_quadrature(lambda x: complex(mpmath.ellipk(1 / abs(k(t, gamma, x)))), cmath.acos((s + 2)/ gamma), cmath.acos(s / gamma)) + complex_quadrature(lambda x: complex(mpmath.ellipk(1 / k(t, gamma, x))), cmath.acos(s / gamma), cmath.acos((s - 2)/ gamma)) + complex_quadrature(lambda x: k(t, gamma, x) * complex(mpmath.ellipk(k(t, gamma, x))), cmath.acos((s - 2)/ gamma), math.pi))
# #print(integrate.quad(lambda x: (k(s, gamma, x) * mpmath.ellipk(k(s, gamma, x))), 0, math.pi))
# G_r = (1 / math.pi) ** 2 * integrate.quad(lambda x: (k(s, gamma, x) * mpmath.ellipk(k(s, gamma, x))), 0, math.pi)[0]
# G_i = 0
#
# #print(s, np.real(G_r))
# plt.scatter(s, np.real(G_r), color="blue")
# plt.scatter(s, np.real(G_i), color="red")
# for s in s_1_to_3:
# G_r = (1 / math.pi) ** 2 * integrate.quad(lambda x: mpmath.ellipk(1 / k(s, gamma, x)), 0, math.acos((s - 2) / gamma))[0] + (1 / math.pi) ** 2 * integrate.quad(lambda x: (k(s, gamma, x) * mpmath.ellipk(k(s, gamma, x))), math.acos((s - 2) / gamma), math.pi)[0]
# G_i = (1 / math.pi) ** 2 * integrate.quad(lambda x: mpmath.ellipk((k(s, gamma, x) ** 2 - 1) ** 0.5 / k(s, gamma, x)), 0, math.acos((s - 2) / gamma))[0]
#
# plt.scatter(s, np.real(G_r), color="blue")
# plt.scatter(s, np.real(G_i), color="red")
# for s in s_0_to_1:
# # G_r = -((1 / math.pi) ** 2) * integrate.quad(lambda x: mpmath.ellipk(1 / abs(k(s, gamma, x))), 0, math.acos(s / gamma))[0] + (1 / math.pi) ** 2 * integrate.quad(lambda x: mpmath.ellipk(1 / k(s, gamma, x)), math.acos(s / gamma), math.pi)[0]
# G_i = (1 / math.pi) ** 2 * integrate.quad(lambda x: mpmath.ellipk(((k(s, gamma, x) ** 2 - 1) ** 0.5) / k(s, gamma, x)), 0, math.pi, limit=10000, points=[math.acos(s / gamma)])[0]
# # plt.scatter(s, np.real(G_r), color="blue")
# plt.scatter(s, np.real(G_i), color="red")
# plt.axis([0, 7, 0, 2])
# plt.show()
# for s in s_0_to_1:
# t = s - epsilon
# G_r = 4 / (math.pi ** 2 * s) * integrate.quad(lambda x: 1 / k(s, gamma, x) * mpmath.ellipk(1 / k(s, gamma, x)), 0, math.acos(s))[0] + 4 / (math.pi ** 2 * s) * integrate.quad(lambda x: mpmath.ellipk(k(s, gamma, x)), math.acos(s), math.pi / 2)[0]
# G_i = 4 / (math.pi ** 2 * s) * integrate.quad(lambda x: 1 / k(s, gamma, x) * mpmath.ellipk((k(s, gamma, x) ** 2 - 1) ** 0.5 / k(s, gamma, x)), 0, math.acos(s))[0]
#
# plt.scatter(s, np.real(G_r), color="blue")
# plt.scatter(s, np.real(G_i), color="red")
#
# for s in s_1_to_3:
# #print(4 / (math.pi ** 2 * s) * integrate.quad(lambda x: mpmath.ellipk(k(s, gamma, x)), 0, math.pi / 2)[0])
# G_r = 4 / (math.pi ** 2 * s) * integrate.quad(lambda x: mpmath.ellipk(k(s, gamma, x)), 0, math.pi / 2)[0]
# G_i = 0
#
# plt.scatter(s, np.real(G_r), color="blue")
# plt.scatter(s, np.real(G_i), color="red")
#
# plt.axis([0, 2, 0, 2.8])
# plt.show()
# for s in s_3_to_6:
# G_r = 4 / (math.pi ** 2 * (s + 1)) * integrate.quad(lambda x: mpmath.ellipk(k(s, gamma, x)), 0, math.pi / 2)[0]
# G_i = 0
#
# plt.scatter(s, np.real(G_r), color="blue")
# plt.scatter(s, np.real(G_i), color="red")
# for s in s_1_to_3:
# #G_r = 4 / (math.pi ** 2 * (s + 1)) * complex_quadrature(lambda x: 1 / k(s, gamma, x) * mpmath.ellipk(k(s, gamma, x)), 0, cmath.acos((s - 1) / 2))[0] #+ 4 / (math.pi ** 2 * (s + 1)) * integrate.quad(lambda x: mpmath.ellipk(float(k(s, gamma, x))), math.acos((s - 1) / 2), math.pi / 2)[0]
# G_i = 4. / (math.pi ** 2. * (s + 1.)) * integrate.quad(lambda x: np.real(1. / k(s, gamma, x) * complex(mpmath.ellipk((k(s, gamma, x) ** 2 - 1) ** .5 / k(s, gamma, x)))), 0, math.acos((s - 1.) / 2.))[0]
# #print(k(s, gamma, 0))
# #plt.scatter(s, np.real(G_r), color="blue")
# plt.scatter(s, np.real(G_i), color="red")
# for s in s_0_to_1:
# #G_r = 4. / (math.pi ** 2. * (s + 1.)) * (integrate.quad(lambda x: np.real(1. / k(s, gamma, x) * complex(mpmath.ellipk(k(s, gamma, x)))), 0., math.acos((s - 1.) / 2.))[0] + integrate.quad(lambda x: np.real(complex(mpmath.ellipk(k(s, gamma, x)))), math.acos((1. - s) / 2.), math.pi / 2.)[0])
# G_i = 4. / (math.pi ** 2. * (s + 1.)) * (integrate.quad(lambda x: np.real(1. / k(s, gamma, x) * complex(mpmath.ellipk((k(s, gamma, x) ** 2 - 1.) ** .5 / k(s, gamma, x)))), 0., math.acos((1. - s) / 2.))[0] + 2 * integrate.quad(lambda x: np.real(complex(mpmath.ellipk((1 - k(s, gamma, x) ** 2) ** .5))), math.acos((1. - s) / 2.), math.pi / 2.)[0])
# #print(1 / k(s, gamma, 0.1) * mpmath.ellipk(k(s, gamma, 0.1)))
# #plt.scatter(s, np.real(G_r), color="blue")
# plt.scatter(s, np.real(G_i), color="red")
# for s in s_neg_1_to_0:
# G_i = 4. / (math.pi ** 2. * (s + 1.)) * (integrate.quad(lambda x: np.real(1. / k(s, gamma, x) * complex(mpmath.ellipk((k(s, gamma, x) ** 2 - 1.) ** .5 / k(s, gamma, x)))), 0., math.acos((1. - s) / 2.))[0] + 2 * integrate.quad(lambda x: np.real(complex(mpmath.ellipk((1 - k(s, gamma, x) ** 2) ** .5))), math.acos((1. - s) / 2.), math.acos((-1 * s) ** .5))[0] + 2 * integrate.quad(lambda x: np.real(1. / (1. - k(s, gamma, x) ** 2) ** .5 * complex(mpmath.ellipk(1. / (1 - k(s, gamma, x) ** 2) ** .5))), math.acos((-1 * s) ** .5), math.pi / 2)[0])
# #print(k(s, gamma, 1.5)) #* complex(mpmath.ellipk(1. / (1 - k(s, gamma, 1.5) ** 2) ** .5))))
# #print(complex(s + math.cos(1.5) ** 2))
# plt.scatter(s, np.real(G_i), color="red")
# plt.axis([-4, 6, -.8, 2.8])
# plt.show()
| mit |
biokit/biokit | biokit/viz/boxplot.py | 1 | 1261 | import pandas as pd
import pylab
class Boxplot(object):
def __init__(self, data):
# if data is a dataframe, keep it else, transform to dataframe
try:
self.df = pd.DataFrame(data)
except:
self.df = data
self.xmax = self.df.shape[1]
self.X = None
def plot(self, color_line='r', bgcolor='grey', color='yellow', lw=4,
hold=False, ax=None):
xmax = self.xmax + 1
if ax:
pylab.sca(ax)
pylab.fill_between([0,xmax], [0,0], [20,20], color='red', alpha=0.3)
pylab.fill_between([0,xmax], [20,20], [30,30], color='orange', alpha=0.3)
pylab.fill_between([0,xmax], [30,30], [41,41], color='green', alpha=0.3)
if self.X is None:
X = range(1, self.xmax + 1)
pylab.fill_between(X,
self.df.mean()+self.df.std(),
self.df.mean()-self.df.std(),
color=color, interpolate=False)
pylab.plot(X, self.df.mean(), color=color_line, lw=lw)
pylab.ylim([0, 41])
pylab.xlim([0, self.xmax+1])
pylab.title("Quality scores across all bases")
pylab.xlabel("Position in read (bp)")
pylab.ylabel("Quality")
pylab.grid(axis='x')
| bsd-2-clause |
henningjp/CoolProp | dev/scripts/ECS_fitter.py | 2 | 8496 | from __future__ import print_function
from math import sqrt, exp
import CoolProp
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
from math import log
def viscosity_dilute(fluid, T, e_k, sigma):
"""
T in [K], e_K in [K], sigma in [nm]
viscosity returned is in [Pa-s]
"""
Tstar = T / e_k
molemass = CoolProp.CoolProp.PropsSI(fluid, 'molemass') * 1000
# From Neufeld, 1972, Journal of Chemical Physics - checked coefficients
OMEGA_2_2 = 1.16145 * pow(Tstar, -0.14874) + 0.52487 * exp(-0.77320 * Tstar) + 2.16178 * exp(-2.43787 * Tstar)
# Using the leading constant from McLinden, 2000 since the leading term from Huber 2003 gives crazy values
eta_star = 26.692e-3 * sqrt(molemass * T) / (pow(sigma, 2) * OMEGA_2_2) / 1e6
return eta_star
def get_psi(fluid, ref_fluid, eta, T, rhomolar, e_k, sigma_nm):
THIS = CoolProp.AbstractState('HEOS', fluid)
REF = CoolProp.AbstractState('HEOS', ref_fluid)
THIS.update(CoolProp.DmolarT_INPUTS, rhomolar, T)
def residual_for_psi(psi, REF):
# Calculate the conformal state
conformal_state = THIS.conformal_state(ref_fluid, -1, -1)
# Calculate ESRR (which are based on the CONFORMAL state values)
f = THIS.T() / conformal_state['T'];
h = conformal_state['rhomolar'] / THIS.rhomolar(); # Must be the ratio of MOLAR densities!!
# The F factor
F_eta = sqrt(f) * pow(h, -2.0 / 3.0) * sqrt(THIS.molar_mass() / REF.molar_mass());
# Dilute viscosity of fluid of interest
eta_dilute = viscosity_dilute(fluid, T, e_k, sigma_nm)
# Required background contribution from reference fluid
viscosity_background_required = (eta - eta_dilute) / F_eta
REF.update(CoolProp.DmolarT_INPUTS, conformal_state['rhomolar'] * psi, conformal_state['T'])
visc_ref = REF.viscosity_contributions()
residual = visc_ref['initial_density'] + visc_ref['residual']
return residual - viscosity_background_required
psi = scipy.optimize.newton(residual_for_psi, 1.0, args=(REF,))
return psi
def arrayize(*args):
return [np.array(a) for a in args]
def HFO():
# Data from Zhao et al. dx.doi.org/10.1021/je5001457 | J. Chem. Eng. Data 2014, 59, 1366-1371
data_R1234yf = """293.15 1109.9 32.8 12.04 0.1442 6.82
303.09 1073.5 43.6 12.53 0.1319 5.71
313.20 1033.6 57.8 13.16 0.1223 4.60
323.19 990.2 76.0 13.88 0.1126 3.55
333.14 941.4 99.7 14.82 0.1016 2.55
343.11 883.5 132.2 16.12 0.0899 1.64
353.08 809.6 179.9 18.17 0.0820 0.81
358.05 761.5 214.8 19.78 0.0770 0.46
363.05 695.7 267.7 22.44 0.0700 0.15
365.05 657.4 301.0 24.26 0.0624 0.05"""
# Data from Zhao et al. dx.doi.org/10.1021/je5001457 | J. Chem. Eng. Data 2014, 59, 1366-1371
data_R1234zeE = """295.23 1172.5 24.1 12.11 0.1776 8.88
303.19 1146.1 30.6 12.46 0.1607 7.91
313.21 1111.1 40.8 12.93 0.1429 6.66
323.19 1073.6 53.6 13.46 0.1319 5.48
333.00 1033.3 69.8 14.06 0.1193 4.36
343.05 986.7 91.3 14.82 0.1132 3.30
353.00 924.0 119.7 15.80 0.1051 2.26
363.12 866.8 160.4 17.28 0.0924 1.35
373.14 776.9 225.2 19.89 0.0817 0.54"""
for fluid, data, e_k, sigma_nm in zip(['R1234yf', 'R1234ze(E)'], [data_R1234yf, data_R1234zeE], [281.14, 292.11], [0.5328, 0.5017]):
xx, yy, RHO, ETA, ETACP, ETARP = [], [], [], [], [], []
for line in data.split('\n'):
T, rhoL, rhoV, etaV, nuL, sigma = line.strip().split(' ')
rhoL = float(rhoL)
T = float(T)
nuL = float(nuL)
rhomolar = rhoL / CoolProp.CoolProp.PropsSI(fluid, 'molemass')
eta = nuL / 1000**2 * rhoL
psi = get_psi(fluid, 'Propane', eta, T, rhomolar, e_k, sigma_nm)
xx.append(T)
yy.append(psi)
RHO.append(rhomolar)
ETA.append(eta)
ETACP.append(CoolProp.CoolProp.PropsSI('V', 'T', T, 'Q', 0, fluid))
ETARP.append(CoolProp.CoolProp.PropsSI('V', 'T', T, 'Q', 0, 'REFPROP::' + CoolProp.CoolProp.get_fluid_param_string(fluid, 'REFPROP_name')))
RHO, xx, ETA, ETACP, ETARP = arrayize(RHO, xx, ETA, ETACP, ETARP)
rhor = RHO / CoolProp.CoolProp.PropsSI(fluid, 'rhomolar_critical')
plt.plot(rhor, yy, 'o-', label='from experimental data')
p = np.polyfit(rhor, yy, 2)
print(p[::-1])
plt.plot(rhor, np.polyval(p, rhor), 'o-', label='from correlation')
plt.xlabel(r'$\rho_r$')
plt.ylabel('$\psi$')
plt.legend(loc='best')
plt.show()
plt.title(fluid)
plt.plot(xx, (ETACP / ETA - 1) * 100, '^', label='CoolProp')
plt.plot(xx, (ETARP / ETA - 1) * 100, 'o', label='REFPROP')
plt.xlabel('Temperature (K)')
plt.ylabel('$100\\times(\eta_{calc}/\eta_{exp}-1)$ (%)')
plt.legend(loc='best')
plt.savefig(fluid + '_deviation.pdf')
plt.show()
def pentanes():
# from doi 10.1021/je0202174 | J. Chem. Eng. Data 2003, 48, 1418-1421
# T (K), rhoL (kg/m^3), rhoV (kg/m^3), eta (mPa-s)
data_cyclopentane = """253.15 258.15 263.15 268.15 273.15 278.15 283.15 288.15 293.15 298.15 303.15 308.15 313.15 318.15 323.15 328.15 333.15 338.15 343.15 348.15 353.15
784.64 779.53 774.59 769.77 765.12 760.20 755.32 750.27 745.02 738.63 731.97 725.15 718.32 711.59 705.11 699.08 693.40 688.44 684.25 680.96 678.71
0.0881 0.1127 0.1443 0.1848 0.2368 0.3036 0.3894 0.4999 0.6421 0.8255 1.062 1.368 1.764 2.279 2.950 3.827 4.980 6.509 8.554 11.33 15.20
0.7268 0.6786 0.6347 0.5930 0.5567 0.5224 0.4922 0.4646 0.4382 0.4148 0.3923 0.3714 0.3521 0.3350 0.3190 0.3048 0.2912 0.2793 0.2690 0.2590 0.2502"""
# from doi 10.1021/je0202174 | J. Chem. Eng. Data 2003, 48, 1418-1421
# T (K), rhoL (kg/m^3), rhoV (kg/m^3), eta (mPa-s)
data_isopentane = """253.15 258.15 263.15 268.15 273.15 278.15 283.15 288.15 293.15 298.15 303.15 308.15 313.15 318.15 323.15 328.15 333.15 338.15 343.15 348.15 353.15
658.32 653.55 648.73 643.87 639.01 634.15 629.35 624.63 620.05 615.69 610.87 605.63 600.05 594.23 588.24 582.18 576.13 570.18 564.41 558.92 553.79
0.4655 0.5889 0.7372 0.9137 1.122 1.366 1.650 1.979 2.356 2.788 3.278 3.833 4.459 5.162 5.949 6.827 7.803 8.886 10.09 11.41 12.87
0.3893 0.3661 0.3439 0.3201 0.3023 0.2859 0.2703 0.2547 0.2399 0.2289 0.2144 0.2023 0.1910 0.1813 0.1724 0.1611 0.1543 0.1480 0.1411 0.1332 0.1287"""
fluid = ''
def undelimit(args, delim=''):
return [np.array([float(_) for _ in a.strip().split(delim)]) for a in args]
from CoolProp.CoolProp import PropsSI
for fluid, e_k, sigma_nm in zip(['CycloPentane', 'Isopentane'], [406.33, 341.06], [0.518, 0.56232]):
xx, yy, RHO, ETA, ETACP, ETARP = [], [], [], [], [], []
for _T, _rhoLmass, _rhoVmass, _eta_mPas in zip(*undelimit(data_cyclopentane.split('\n'), delim=' ')):
MM = PropsSI('molemass', fluid)
rhomolar = _rhoLmass / MM
eta = _eta_mPas / 1000
psi = get_psi(fluid, 'Propane', eta, _T, rhomolar, e_k, sigma_nm)
xx.append(_T)
yy.append(psi)
RHO.append(rhomolar)
try:
ETACP.append(CoolProp.CoolProp.PropsSI('V', 'T', _T, 'Q', 0, fluid))
except:
ETACP.append(np.nan)
ETA.append(eta)
ETARP.append(CoolProp.CoolProp.PropsSI('V', 'T', _T, 'Q', 0, 'REFPROP::' + CoolProp.CoolProp.get_fluid_param_string(fluid, 'REFPROP_name')))
xx, yy, ETACP, ETARP, RHO, = arrayize(xx, yy, ETACP, ETARP, RHO)
rhored = CoolProp.CoolProp.PropsSI(fluid, 'rhomolar_critical')
print('rhored', rhored)
rhor = np.array(RHO) / rhored
plt.title(fluid)
plt.plot(rhor, yy, 'o-', label='from experimental data')
p = np.polyfit(rhor, yy, 2)
print(p[::-1])
plt.plot(rhor, np.polyval(p, rhor), 'o-', label='from correlation')
plt.xlabel(r'$\rho_r$')
plt.ylabel('$\psi$')
plt.legend(loc='best')
plt.show()
plt.title(fluid)
plt.plot(xx, (ETACP / ETA - 1) * 100, '^', label='CoolProp')
plt.plot(xx, (ETARP / ETA - 1) * 100, 'o', label='REFPROP')
plt.xlabel('Temperature (K)')
plt.ylabel('$100\\times(\eta_{calc}/\eta_{exp}-1)$ (%)')
plt.legend(loc='best')
plt.savefig(fluid + '_deviation.pdf')
plt.show()
HFO()
pentanes()
| mit |
mvdoc/gfusion | gfusion/main.py | 1 | 5942 | """
Densify a known association matrix given similarity matrices for source 1
and source 2.
See Zhang, Ping, Fei Wang, and Jianying Hu. "Towards drug repositioning:
a unified computational framework for integrating multiple aspects of
drug similarity and disease similarity." AMIA Annu Symp Proc. 2014.
"""
import numpy as np
from scipy.spatial.distance import squareform
from sklearn.utils.extmath import squared_norm
from .optimize import simplex_projection
def densify(D, S, R):
"""Densify a matrix R
Parameters
---------
D : np.ndarray (n_similarities, n_features)
flattened upper triangular similarity matrices for source 1
S : np.ndarray (m_similarities, m_features)
flattened upper triangular similarity matrices for source 2
R : np.ndarray (n_features, m_features)
known association matrix between source 1 and source 2
Returns
-------
Theta : np.ndarray (n_features, m_features)
densified association matrix between source 1 and source 2
"""
pass
def _initialize_values(D, S):
"""Initialize values
Parameters
---------
D : np.ndarray (n_similarities, n_features)
similarity matrices for source 1
S : np.ndarray (m_similarities, m_features)
similarity matrices for source 2
Returns
-------
lambda1 : float
lambda2 : float
delta1 : float
delta2 : float
omega : np.ndarray (n_similarities, 1)
pi : np.ndarray (m_similarities, 1)
"""
pass
def _compute_symmetric_nonnegative_factorization(M):
"""
Parameters
----------
M : (n, n) symmetric matrix
Returns
-------
S : (n, n) non negative symmetric
"""
pass
def _initialize_latent_matrices(D, S, omega, pi):
"""Performs Symmetric Nonnegative Matrix Factorization to initialize
the latent matrices U and V
Parameters
---------
D : np.ndarray (n_similarities, n_features)
similarity matrices for source 1
S : np.ndarray (m_similarities, m_features)
similarity matrices for source 2
omega : np.ndarray (n_similarities, 1)
pi : np.ndarray (m_similarities, 1)
Returns
-------
U : np.ndarray (n_features, latent_dimension1)
latent matrix for source 1
V : np.ndarray (m_features, latent_dimension2)
latent matrix for source 2
"""
pass
def _solve_lambda(U, V):
"""Solve lambda and all my problems
Parameters
---------
U : np.ndarray (n_features, latent_dimension1)
latent matrix for source 1
V : np.ndarray (m_features, latent_dimension2)
latent matrix for source 2
Returns
-------
L : np.ndarray (latent_dimension1, latent_dimension2)
aka lambda, something we're trying to optimize
"""
pass
def _solve_theta(U, V, L, R):
"""Solve theta
Parameters
---------
U : np.ndarray (n_features, latent_dimension1)
latent matrix for source 1
V : np.ndarray (m_features, latent_dimension2)
latent matrix for source 2
L : np.ndarray (latent_dimension1, latent_dimension2)
R : np.ndarray (n_features, m_features)
known association matrix between source 1 and source 2
Returns
-------
Theta : np.ndarray (n_features, m_features)
densified association matrix between source 1 and source 2
"""
W = np.dot(np.dot(U, L), V.T)
assert(W.shape == R.shape)
theta = R.copy()
mask = np.where(theta == 0.)
theta[mask] = W[mask]
return theta
def _solve_weight_vector(similarities, grouping_matrix, delta):
"""Solve for the weight vector of the similarities, used for
_solve_omega and _solve_pi
Parameters
----------
similarities : np.ndarray (n_similarities,
(n_features * (n_features - 1) /2)
similarity matrices
grouping_matrix : np.ndarray (n_features, n_communities)
delta : float
Returns
-------
weights : np.ndarray (1, n_similarities)
"""
# do some type check
if np.any(similarities < 0):
raise ValueError('similarities contain invalid values (< 0)')
if delta <= 0:
raise ValueError('delta value of {0} not allowed, '
'needs to be >=0'.format(delta))
sigma = np.dot(grouping_matrix, grouping_matrix.T)
n_similarities = len(similarities)
# preallocate vector
a = np.zeros(n_similarities)
for i in range(n_similarities):
a[i] = squared_norm(squareform(similarities[i]) - sigma)
# solve for weight
weight = simplex_projection(a/(2*delta))
return np.atleast_2d(weight)
def _solve_omega(D, U, delta1):
"""Solve omega
Parameters
---------
D : np.ndarray (n_similarities, (n_features * (n_features - 1) / 2))
similarity matrices for source 1
U : np.ndarray (n_features, latent_dimension1)
latent matrix for source 1
delta1 : float
Returns
-------
omega : np.ndarray (n_similarities, 1)
"""
return _solve_weight_vector(D, U, delta1)
def _solve_pi(S, V, delta2):
"""Solve pi
Parameters
---------
S : np.ndarray (m_similarities, m_features)
similarity matrices for source 2
V : np.ndarray (m_features, latent_dimension2)
latent matrix for source 2
delta2 : float
Returns
-------
pi : np.ndarray (m_similarities, 1)
"""
return _solve_weight_vector(S, V, delta2)
def _solve_u(Theta, U, V, L, lambda1, D, omega):
"""Solve U
Parameters
---------
TODO
Returns
-------
U : np.ndarray (n_features, latent_dimension1)
latent matrix for source 1
"""
pass
def _solve_v(Theta, U, V, L, lambda2, S, pi):
"""Solve V
Parameters
---------
TODO
Returns
-------
V : np.ndarray (m_features, latent_dimension2)
latent matrix for source 2
"""
pass
| mit |
LIKAIMO/MissionPlanner | Lib/site-packages/numpy/doc/creation.py | 94 | 5411 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or record arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard
to convert are libraries like PIL (able to read and write many image formats
such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
| gpl-3.0 |
RayMick/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
ashhher3/scikit-learn | sklearn/neighbors/graph.py | 5 | 5086 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(
n_neighbors, metric=metric, p=p, metric_params=metric_params
).fit(X)
else:
_check_params(X, metric, p, metric_params)
return X.kneighbors_graph(X._fit_X, n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(
radius=radius, metric=metric, p=p,
metric_params=metric_params
).fit(X)
else:
_check_params(X, metric, p, metric_params)
return X.radius_neighbors_graph(X._fit_X, radius, mode)
| bsd-3-clause |
afgaron/rgz-analysis | python/expert_all.py | 2 | 37623 | # import necessary python packages
import numpy as np
import datetime
import os,sys
import urllib
import cStringIO
import json
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
import dateutil.parser
from matplotlib import pyplot as plt
from matplotlib import cm
from collections import Counter
from pymongo import MongoClient
from scipy import stats
from PIL import Image
from collections import OrderedDict
from scipy.linalg.basic import LinAlgError
import matplotlib.patches as patches
from matplotlib.path import Path
import requests
from StringIO import StringIO
#------------------------------------------------------------------------------------------------------------
# Setup path locations
rgz_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
csv_dir = '%s/csv' % rgz_dir
plot_dir = '%s/plots/expert' % rgz_dir
if not os.path.isdir(plot_dir):
os.mkdir(plot_dir)
dat_dir = '%s/datfiles/expert/expert_all' % rgz_dir
if not os.path.isdir(dat_dir):
os.mkdir(dat_dir)
# Set constants
beta_release_date = datetime.datetime(2013, 10, 20, 12, 0, 0, 0) # date of beta release (YYY,MM,DD,HH,MM,SS,MS)
main_release_date = datetime.datetime(2013, 12, 17, 0, 0, 0, 0)
IMG_HEIGHT = 424.0 # number of pixels in the JPG image along the y axis
IMG_WIDTH = 424.0 # number of pixels in the JPG image along the x axis
FITS_HEIGHT = 301.0 # number of pixels in the FITS image along the y axis
FITS_WIDTH = 301.0 # number of pixels in the FITS image along the x axis
PIXEL_SIZE = 0.00016667#/3600.0 # the number of arcseconds per pixel in the FITS image
xmin = 1.
xmax = IMG_HEIGHT
ymin = 1.
ymax = IMG_WIDTH
xjpg2fits = float(IMG_WIDTH/FITS_WIDTH) # map the JPG pixels to the FITS pixels in x
yjpg2fits = float(IMG_HEIGHT/FITS_HEIGHT) # map the JPG pixels to the FITS pixels in y
bad_keys = ('finished_at','started_at','user_agent','lang')
plt.ion()
def list_flatten(x):
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(list_flatten(el))
else:
result.append(el)
return result
def plot_npeaks(volunteers=False):
if volunteers:
readfile = 'expert/npeaks_ir_expert_volunteer.csv'
writefile = 'expert_all/ir_peaks_histogram_expert_all.png'
else:
readfile = 'expert/npeaks_ir_expert_all.csv'
writefile = 'expert_all/ir_peaks_histogram_expert_volunteer.png'
# Read in data
with open('%s/%s' % (csv_dir,readfile),'rb') as f:
npeaks = [int(line.rstrip()) for line in f]
# Plot the distribution of the total number of IR sources per image
fig = plt.figure(figsize=(8,7))
ax1 = fig.add_subplot(111)
h = plt.hist(npeaks,bins=np.arange(np.max(npeaks)+1),axes=ax1)
ax1.set_title('RGZ source distribution')
ax1.set_xlabel('Number of IR peaks per image')
ax1.set_ylabel('Count')
#fig.show()
fig.tight_layout()
# Save hard copy of the figure
fig.savefig('%s/%s' % (plot_dir,writefile))
plt.close()
return None
def find_ir_peak(x,y):
# Perform a kernel density estimate on the data:
X, Y = np.mgrid[xmin:xmax, ymin:ymax]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
# Find the number of peaks
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
#neighborhood = generate_binary_structure(2,2)
neighborhood = np.ones((10,10))
local_max = maximum_filter(Z, footprint=neighborhood)==Z
background = (Z==0)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
detected_peaks = local_max - eroded_background
npeaks = detected_peaks.sum()
return X,Y,Z,npeaks
def plot_image(ir_x,ir_y,sub,X,Y,Z,npeaks,all_radio,radio_unique,volunteers=False):
if volunteers:
writefile = 'expert_volunteers/ir_peaks/%s_ir_peak.png' % sub['zooniverse_id']
else:
writefile = 'expert_all/ir_peaks/%s_ir_peak.png' % sub['zooniverse_id']
# Plot the infrared results
fig = plt.figure(1,(15,4))
ax3 = fig.add_subplot(143)
assert len(ir_x) == len(ir_y), \
'Length of IR x- and y-vectors must be the same'
if len(ir_x) > 2:
# Find the peak
xpeak = X[Z==Z.max()][0]
ypeak = Y[Z==Z.max()][0]
# Plot the KDE map
ax3.imshow(np.rot90(Z), cmap=plt.cm.hot_r,extent=[xmin, xmax, ymin, ymax])
# Plot the individual sources
if len(ir_x) > 2: # At least 3 non-singular IR points means that the KDE map can be generated and peaks estimated
ax3.text(50,40,r'Main IR peak: $(%i,%i)$' % (xpeak,ypeak),color='k',fontsize=12)
ax3.text(50,70,r'$N_{IR\/peaks}$ = %i' % npeaks,color='k',fontsize=12)
ax3.plot(ir_x, ir_y, 'go', markersize=4)
ax3.plot([xpeak],[ypeak],'c*',markersize=12)
elif len(ir_x) == 2: # 2 IR points are simply plotted, with no KDE estimation
ax3.text(50,40,r'IR peaks: $(%i,%i),(%i,%i)$' % (ir_x[0],ir_y[0],ir_x[1],ir_y[1]),color='k',fontsize=12)
ax3.text(50,70,r'$N_{IR\/peaks}$ = 2',color='k',fontsize=12)
ax3.plot(ir_x,ir_y,'c*',markersize=12)
elif len(ir_x) == 1: # 1 IR point is simply plotted, with no KDE estimation
print ir_x,ir_y
ax3.text(50,40,r'IR peak: $(%i,%i)$' % (ir_x[0],ir_y[0]),color='k',fontsize=12)
ax3.text(50,70,r'$N_{IR\/peaks}$ = 1',color='k',fontsize=12)
ax3.plot(ir_x,ir_y,'c*',markersize=12)
if len(ir_x) == 0: # 0 IR points identified by any user
ax3.text(50,70,'No IR sources',color='k',fontsize=12)
# Plot the radio counts
radio_flattened = [item for sublist in all_radio for item in sublist]
uniques = set(radio_flattened)
d = dict(zip(uniques,np.arange(len(uniques))))
c = Counter(all_radio)
cmc = c.most_common()[::-1]
# Sort by number of components?
for idx,(c_xval,n) in enumerate(cmc):
if len(c_xval) > 1:
tlist = [str(d[x]) for x in c_xval]
t = ' and R'.join(sorted(tlist))
else:
t = d[c_xval[0]]
singular = 's' if n != 1 else ''
ax3.text(550,400-idx*25,'%3i vote%s: R%s' % (n,singular,t),fontsize=11)
# Download contour data
r = requests.get(sub['location']['contours'])
contours = r.json()
sf_x = 500./contours['width']
sf_y = 500./contours['height']
verts_all = []
codes_all = []
components = contours['contours']
for comp in components:
for idx,level in enumerate(comp):
verts = [((p['x'])*sf_x,(p['y']-1)*sf_y) for p in level['arr']]
codes = np.ones(len(verts),int) * Path.LINETO
codes[0] = Path.MOVETO
verts_all.extend(verts)
codes_all.extend(codes)
path = Path(verts_all, codes_all)
patch_black = patches.PathPatch(path, facecolor = 'none', edgecolor='black', lw=1)
# Scaling factor for FITS to radio files
radio_ir_scaling_factor = 500./132
# Rectangle showing the radio box size
box_counts = Counter(radio_flattened)
for ru in radio_unique:
x0,x1,y0,y1 = [float(ru_) * radio_ir_scaling_factor for ru_ in ru]
# Assume xmax matching is still good
xmax_index = '%.6f' % float(ru[1])
component_number = d[xmax_index]
number_votes = box_counts[xmax_index]
rectangle = plt.Rectangle((x0,y0), x1-x0, y1-y0, fill=False, linewidth=number_votes/5., edgecolor = 'c')
ax3.add_patch(rectangle)
ax3.text(x0-15,y0-15,'R%s' % component_number)
ax3.set_xlim([0, 500])
ax3.set_ylim([500, 0])
ax3.set_title('%s \n %s' % (sub['zooniverse_id'],sub['metadata']['source']))
ax3.set_aspect('equal')
# Display IR and radio images
url_standard = sub['location']['standard']
im_standard = Image.open(cStringIO.StringIO(urllib.urlopen(url_standard).read()))
ax1 = fig.add_subplot(141)
ax1.imshow(im_standard,origin='upper')
ax1.add_patch(patch_black)
ax1.set_title('WISE')
url_radio = sub['location']['radio']
im_radio = Image.open(cStringIO.StringIO(urllib.urlopen(url_radio).read()))
ax2 = fig.add_subplot(142)
ax2.imshow(im_radio,origin='upper')
ax2.set_title('FIRST')
if volunteers:
ax2.set_xlabel('volunteers')
else:
ax2.set_xlabel('experts')
# Save hard copy of the figure
fig.savefig('%s/%s' % (plot_dir,writefile))
# Close figure after it's done; otherwise mpl complains about having thousands of stuff open
plt.close()
return None
def find_consensus(sub,classifications,verbose=False,volunteers=False):
Nclass = sub["classification_count"] # number of classifications made per image
srcid = sub["metadata"]["source"] # determine the image source id
imgid = sub["_id"] # grab the ObjectId corresponding for this image
# locate all the classifications of this image by either the experts or volunteers
if volunteers:
user_classifications = classifications.find({"subject_ids": imgid, 'expert':{'$exists':False}})
Nusers = classifications.find({"subject_ids": imgid, 'expert':{'$exists':False}}).count()
prefix = 'volunteer'
else:
user_classifications = classifications.find({"subject_ids": imgid, 'expert':True})
Nusers = classifications.find({"subject_ids": imgid, 'expert':True}).count()
prefix = 'expert'
# loop over the number of classifications
if Nusers > 0: # the number of classifications should equal the number of users who classified
classfile2 = open('%s/RGZ-%s-%s-classifications.txt' % (dat_dir,prefix,srcid), 'w')
# initialise coordinate variables
radio_ra = []
radio_dec = []
radio_x = []
radio_y = []
radio_w = []
radio_h = []
ir_ra = []
ir_dec = []
ir_radius = []
ir_x = []
ir_y = []
radio_comp = []
ir_comp = []
all_radio = []
all_radio_markings = []
Nuser_id = 0 # User id number
#---------------------------------------------------------------------------------------------------------------------
#---START: loop through the users who classified the image
for classification in list(user_classifications):
compid = 0 # Component id per image
rclass = classification["annotations"] # For now, analyze only the first set of continuous regions selected.
# Note that last two fields in annotations are timestamp and user_agent
Nuser_id += 1 # Increase the number of users who classified by 1.
#-------------------------------------------------------------------------------------------------------------------
#---START: loop through the keys in the annotation array, making sure that a classification has been made
for ann in rclass:
if ann.has_key('started_at') or ann.has_key('finished_at') or ann.has_key('user_agent') or ann.has_key('lang'):
continue
Nradio = 0 # counter for the number of radio components per classification
Nir = 0 # counter for the number of IR components per classification
if (ann.has_key('radio') and ann['radio'] != 'No Contours'): # get the radio annotations
radio = ann["radio"]
Nradio = len(radio) # count the number of radio components per classification
'''
print 'RADIO:'
print radio
'''
compid += 1 # we have a radio source - all components will be id with this number
list_radio = []
#---------------------------------------------------------------------------------------------------------------
#---START: loop through number of radio components in user classification
for rr in radio:
radio_marking = radio[rr]
# Find the location and size of the radio box in pixels
list_radio.append('%.6f' % float(radio_marking['xmax']))
all_radio_markings.append(radio_marking)
print >> classfile2, Nuser_id, compid,'RADIO', radio_marking['xmin'], radio_marking['xmax'], radio_marking['ymin'], radio_marking['ymax']
all_radio.append(tuple(sorted(list_radio)))
#---END: loop through number of radio components in user classification
#---------------------------------------------------------------------------------------------------------------
# get IR counterpart
irkey = ann.has_key('ir')
ir_nosources = True if (irkey and ann['ir'] == 'No Sources') else False
if (irkey and not ir_nosources): # get the infrared annotation for the radio classification.
ir = ann["ir"]
Nir = 1 #len(ir) # number of IR counterparts.
'''
print 'IR:'
print ir
'''
#exit()
#jj = 0
for ii in ir:
ir_marking = ir[ii]
# write to annotation file
print >> classfile2, Nuser_id, compid, 'IR', float(ir_marking['x']), float(ir_marking['y'])
ir_x.append(float(ir_marking['x']))
ir_y.append(float(ir_marking['y']))
else: # user did not classify an infrared source
Nir = 0
xir = -99.
yir = -99.
radiusir = -99.
print >> classfile2, Nuser_id, compid, 'IR', xir, yir
else: # user did not classify a radio source
Nradio = 0
Nir = 0
# there should always be a radio source, bug in program if we reach this part.
if not ann.has_key('radio'):
print >> classfile2,'%i No radio source - error in processing on image %s' % (Nuser_id, srcid)
elif ann['radio'] == 'No Contours':
print >> classfile2,'%i No radio source labeled by user for image %s' % (Nuser_id,srcid)
else:
print >> classfile2,'Unknown error processing radio source'
radio_comp.append( Nradio ) # add the number of radio components per user source to array.
ir_comp.append( Nir ) # add the number of IR counterparts per user soruce to array.
#---END: loop through the users who classified the image
#---------------------------------------------------------------------------------------------------------------------
# Process the radio markings into unique components
rlist = [(rr['xmin'],rr['xmax'],rr['ymin'],rr['ymax']) for rr in all_radio_markings]
if len(rlist) > 0:
radio_unique = [rlist[0]]
if len(all_radio_markings) > 1:
for rr in rlist[1:]:
if rr not in radio_unique:
radio_unique.append(rr)
nr = False
else:
nr = True
radio_unique = [(0,0,0,0)]
# Use a 2-D Gaussian kernel to find the center of the IR sources and plot the analysis images
if len(ir_x) > 2:
try:
xpeak,ypeak,Z,npeaks = find_ir_peak(ir_x,ir_y)
plot_image(ir_x,ir_y,sub,xpeak,ypeak,Z,npeaks,all_radio,radio_unique,volunteers=volunteers)
except LinAlgError:
npeaks = len(ir_x)
else:
print 'Length of IR vector was less than 2'
npeaks = len(ir_x)
xpeak,ypeak,Z = np.zeros((423,423)),np.zeros((423,423)),np.zeros((423,423))
plot_image(ir_x,ir_y,sub,xpeak,ypeak,Z,npeaks,all_radio,radio_unique,volunteers=volunteers)
'''
# Plot analysis images
npeaks = len(ir_x)
if npeaks == 0:
ir_x,ir_y = [0],[0]
plot_image(ir_x[0],ir_y[0],npeaks,sub,all_radio,radio_unique,no_radio = nr)
'''
# calculate the median number of components for both IR and radio for each object in image.
radio_med = np.median(radio_comp) # median number of radio components
Ncomp_radio = np.size(np.where(radio_comp == radio_med)) # number of classifications = median number
ir_med = np.median(ir_comp) # median number of infrared components
Ncomp_ir = np.size(np.where(ir_comp == ir_med)) # number of classifications = median number
if verbose:
print ' '
print 'Source............................................................................................: %s' % srcid
print 'Number of %s users who classified the object..................................................: %d' % (prefix,Nusers)
print '................'
print 'Number of %s users who classified the radio source with the median value of radio components..: %d' % (prefix,Ncomp_radio)
print 'Median number of radio components per %s user.................................................: %f' % (prefix,radio_med)
print 'Number of %s users who classified the IR source with the median value of IR components........: %d' % (prefix,Ncomp_ir)
print 'Median number of IR components per %s user....................................................: %f' % (prefix,ir_med)
print ' '
classfile2.close()
else:
print '%ss did not classify subject %s.' % (prefix.capitalize(),sub['zooniverse_id'])
ir_x,ir_y = 0,0
return None
def load_rgz_data():
# Connect to Mongo database
# Make sure to run mongorestore /path/to/database to restore the updated files
# mongod client must be running locally
client = MongoClient('localhost', 27017)
db = client['radio']
subjects = db['radio_subjects'] # subjects = images
classifications = db['radio_classifications'] # classifications = classifications of each subject per user
return subjects,classifications
def load_expert_parameters():
expert_path = '%s/expert' % rgz_dir
# Note all times should be in UTC (Zulu)
json_data = open('%s/expert_params.json' % expert_path).read()
experts = json.loads(json_data)
return experts
def run_expert_sample(subjects,classifications):
expert_zid = open('%s/expert/expert_all_zooniverse_ids.txt' % rgz_dir).read().splitlines()
N = 0
with open('%s/expert/npeaks_ir_expert_all.csv' % (csv_dir),'wb') as f:
for sub in list(subjects.find({'zooniverse_id':{'$in':expert_zid},'classification_count':{'$gt':0}})):
Nclass = sub["classification_count"] # number of classifications made per image
if Nclass > 0: # if no classifications move to next image
npeak = find_consensus(sub,classifications)
print >> f, npeak
N += 1
# Check progress by printing to screen every 10 classifications
if not N % 10:
print N, datetime.datetime.now().strftime('%H:%M:%S.%f')
return None
def run_volunteer_sample(subjects,classifications):
expert_zid = open('%s/expert/expert_all_zooniverse_ids.txt' % rgz_dir).read().splitlines()
N = 0
with open('%s/expert/npeaks_ir_expert_volunteer.csv' % (csv_dir),'wb') as f:
for sub in list(subjects.find({'zooniverse_id':{'$in':expert_zid},'classification_count':{'$gt':0}})):
Nclass = sub["classification_count"] # number of classifications made per image
if Nclass > 0: # if no classifications move to next image
npeak = find_consensus(sub,classifications,volunteers=True)
print >> f, npeak
N += 1
# Check progress by printing to screen every 10 classifications
if not N % 10:
print N, datetime.datetime.now().strftime('%H:%M:%S.%f')
return None
def sdi(data):
# Shannon diversity index
def p(n, N):
""" Relative abundance """
if n is 0:
return 0
else:
return (float(n)/N) * np.log(float(n)/N)
N = sum(data.values())
return -sum(p(n, N) for n in data.values() if n is not 0)
def compare_expert_consensus():
with open('%s/expert/expert_all_first_ids.txt' % rgz_dir,'rb') as f:
first_ids = [line.rstrip() for line in f]
exlist = load_expert_parameters()
ir_array = []
ex_array = []
for first in first_ids:
ir_temp = []
exlist_arr = []
for ex in exlist:
with open('%s/datfiles/expert/%s/RGZBETA2-%s-classifications.txt' % (rgz_dir,ex['expert_user'],first)) as f:
x = [line.rstrip() for line in f]
try:
last_line = x[-1].split()
n_ir = int(last_line[1])
x_ir = last_line[3]
if x_ir == '-99.0':
n_ir = 0
ir_temp.append(n_ir)
exlist_arr.append(ex['expert_user'])
except:
pass
ex_array.append(exlist_arr)
ir_array.append(ir_temp)
'''
# Plot number of users per galaxy
excount = [len(x) for x in ex_array]
fig2 = plt.figure(2)
ax2 = fig2.add_subplot(111)
ax2.hist(excount,bins=6,range=(5,11))
ax2.set_xlim(6,11)
fig2.show()
fig2.tight_layout()
'''
c = [Counter(i) for i in ir_array]
fig = plt.figure(1,(15,4))
ax = fig.add_subplot(111)
larr = []
varr = []
sdi_arr = []
for idx,cc in enumerate(c):
l,v = zip(*cc.items())
larr.append(list(l))
varr.append(list(v))
if len(l) > 1:
sdi_arr.append(sdi(cc)/np.log(len(l)))
else:
sdi_arr.append(sdi(cc))
iarr = []
sarr = []
for idx,(l,s) in enumerate(zip(larr,sdi_arr)):
iarr.append(np.zeros(len(l),dtype=int)+idx)
sarr.append(np.zeros(len(l),dtype=float)+s)
iarr = list_flatten(iarr)
larr = list_flatten(larr)
varr = list_flatten(varr)
sarr = list_flatten(sarr)
zipped = zip(sarr,larr,iarr,varr)
zipped.sort()
sarr,larr,iarr,varr = zip(*zipped)
ikeys = list(OrderedDict.fromkeys(iarr))
inew = [ikeys.index(ii) for ii in iarr]
sc = ax.scatter(inew,larr,c=sarr,s=((np.array(varr)+5)**2),edgecolor='k',cmap = cm.RdBu_r,vmin=0.,vmax =1.0)
cbar = plt.colorbar(sc)
cbar.set_label('Normalized Shannon entropy')
ax.set_xlim(-1,101)
ax.set_xlabel('Galaxy')
ax.set_ylabel('Number of IR sources')
ax.set_aspect('auto')
lnp = np.array(larr)
inp = np.array(inew)
for i in inp:
if (inp == i).sum() > 1:
lmin = np.min(lnp[inp == i])
lmax = np.max(lnp[inp == i])
ax.plot([i,i],[lmin,lmax],color='black')
fig.show()
fig.tight_layout()
# Now analyzing the radio components. Order can definitely change, even when associated with a single IR source.
# Sort first? Sort first on second column, then radio? That would make sure everything agrees...
'''
OK. So, Kevin doesn't have any classifications in the set recorded. There are 134 classifications by his IP address in the timeframe
in question, but they're very short (only 18 minutes), and none of them are in the set of 100. Lots of duplicates, too.
Eight users classified every galaxy in the sample of 100.
42jkb, ivywong, enno.middelberg, xDocR, KWillett, stasmanian, akpinska, vrooje
klmasters only shows up for 25 of them (as expected) Galaxies done in timeframe:
Kevin appears in 0 139 - Claims he was logged in, but zero classifications under username Kevin
There are 139 galaxies done by someone matching his IP address,
but none are in the expert sample of 100 (no idea why).
Assume we got no useful classification data from him.
'''
return ir_array
def compare_volunteer_consensus(subjects,classifications):
# Just looks at the total number of IR sources per subject as measured by volunteers.
# Should be able to get this by querying MongoDB directly.
with open('%s/expert/expert_all_zooniverse_ids.txt' % rgz_dir,'rb') as f:
zooniverse_ids = [line.rstrip() for line in f]
# Empty arrays
ir_array = []
usernames_array = []
# Load parameters for the expert science team
experts = load_expert_parameters()
# Loop over each object in the sample of 100
for zid in zooniverse_ids:
ir_temp = []
username_temp = []
# Get all classifications for the subject
subject_id = subjects.find_one({'zooniverse_id':zid})['_id']
clist = classifications.find({'subject_ids.0':subject_id})
# List of classifications whose classifications shouldn't be included. Start with experts.
usernames_bad = [x['expert_user'] for x in experts]
# Loop over all classifications
for c in clist:
# Test if user was logged in
try:
user_name = c['user_name']
if user_name not in usernames_bad:
annotations = c['annotations']
# Record the number of galaxies (that is, IR sources) in image
na = len(annotations)
for a in annotations:
# Don't count metadata
if a.keys()[0] in bad_keys:
na -= 1
# Don't count if source has no sources or contours; likely a glitch in system
if 'ir' in a.keys():
if a['ir'] is 'No Sources' and a['radio'] is 'No Contours':
na -= 1
# Count the total number of galaxies in image recorded by the user
ir_temp.append(na)
username_temp.append(user_name)
# Prevent counts of duplicate classifications by the same user by adding name to the prohibited list
usernames_bad.append(user_name)
'''
else:
print 'Eliminating %s for %s' % (user_name,zid)
'''
# Do not include classifications by anonymous users
except KeyError:
pass
'''
username_temp.append('Anonymous')
annotations = c['annotations']
na = len(annotations)
for a in annotations:
if a.keys()[0] in bad_keys:
na -= 1
if 'ir' in a.keys():
if a['ir'] is 'No Sources' and a['radio'] is 'No Contours':
na -= 1
ir_temp.append(na)
'''
# Append counts to the master arrays
ir_array.append(ir_temp)
usernames_array.append(username_temp)
i_nozeros = []
for ii,zz,uu in zip(ir_array,zooniverse_ids,usernames_array):
if len(ii) > 0:
i_nozeros.append(ii)
else:
print 'No non-expert classifications for %s' % zz,uu
c = [Counter(i) for i in i_nozeros]
fig = plt.figure(2,(15,4))
fig.clf()
ax = fig.add_subplot(111)
larr = []
varr = []
sdi_arr = []
for idx,cc in enumerate(c):
l,v = zip(*cc.items())
larr.append(list(l))
varr.append(list(v))
if len(l) > 1:
sdi_arr.append(sdi(cc)/np.log(len(l)))
else:
sdi_arr.append(sdi(cc))
iarr = []
sarr = []
for idx,(l,s) in enumerate(zip(larr,sdi_arr)):
iarr.append(np.zeros(len(l),dtype=int)+idx)
sarr.append(np.zeros(len(l),dtype=float)+s)
iarr = list_flatten(iarr) # Index of galaxy image
larr = list_flatten(larr) # Number of IR sources
varr = list_flatten(varr) # Number of users who selected the given number of IR sources
sarr = list_flatten(sarr) # Shannon diversity index
zipped = zip(sarr,larr,iarr,varr)
zipped.sort()
sarr,larr,iarr,varr = zip(*zipped)
ikeys = list(OrderedDict.fromkeys(iarr))
inew = [ikeys.index(ii) for ii in iarr]
sc = ax.scatter(inew,larr,c=sarr,s=((np.array(varr)+5)**2),edgecolor='k',cmap = cm.RdBu_r,vmin=0.,vmax =1.0)
cbar = plt.colorbar(sc)
cbar.set_label('Normalized Shannon entropy')
ax.set_xlim(-1,len(c)+1)
ax.set_title('RGZ volunteer classifications')
ax.set_xlabel('Galaxy')
ax.set_ylabel('Number of IR sources')
ax.set_aspect('auto')
lnp = np.array(larr)
inp = np.array(inew)
# Draw black lines between dots
for i in inp:
if (inp == i).sum() > 1:
lmin = np.min(lnp[inp == i])
lmax = np.max(lnp[inp == i])
ax.plot([i,i],[lmin,lmax],color='black')
fig.show()
#fig.tight_layout()
return None
def expert_vs_volunteer():
# Direct comparison of the expert vs. volunteer classifications for all galaxies?
return None
def histogram_experts(classifications):
# DEPRECATED
# As of 7 Feb 2016, RGZ data dumps do not include the users collection.
# Goal: find the distribution and average number of IR sources per image for the science team
do_experts = True
if do_experts:
experts = load_expert_parameters()
# Add Larry's regular username as well as Ray Norris
experts.append({'expert_user':'DocR'})
experts.append({'expert_user':'raynorris'})
expert_avg = []
for ex in experts:
username = ex['expert_user']
classcount = classifications.find({'user_name':username}).count()
if classcount > 0:
c = classifications.find({'user_name':username})
nir = []
for cc in list(c):
annotations = cc['annotations']
na = len(annotations)
for a in annotations:
if a.keys()[0] in bad_keys:
na -= 1
if 'ir' in a.keys():
if a['ir'] is 'No Sources' and a['radio'] is 'No Contours':
na -= 1
nir.append(na)
print '%20s averages %.2f IR sources per image over %i classifications' % (username,np.mean(nir),classcount)
expert_avg.append(np.mean(nir))
print '-----------------------------------'
# Now look at the volunteers
rgz_id = classifications.find_one()['project_id']
# Odd that about half of the users don't seem to have a classification count for RGZ. Is that actually true?
'''
In [80]: users.find({'projects.%s.classification_count' % rgz_id:{'$exists':True}}).count()
Out[80]: 4907
In [79]: users.find({'projects.%s.classification_count' % rgz_id:{'$exists':False}}).count()
Out[79]: 3312
All users with a classification count do have at least one classification.
In the second group, though, most have zero, but some have a couple classifications (maximum of 6)
742 have at least one classification
2570 have no classifications
So we actually have only 4907+742 = 5,649 contributing users, rather than the 8,219 people in the users db and the 4,955 listed on the API
'''
# Concatenate the two groups
users_good = list(users.find({'projects.%s.classification_count' % rgz_id:{'$exists':True}}))
users_unsure = users.find({'projects.%s.classification_count' % rgz_id:{'$exists':False}})
for u in list(users_unsure):
if classifications.find({'user_id':u['_id']}).count() > 0:
users_good.append(u)
nir_username_volunteers = []
nir_avg_volunteers = []
nir_count_volunteers = []
for u in users_good:
classcount = classifications.find({'user_id':u['_id']}).count()
if classcount > 0:
c = classifications.find({'user_id':u['_id']})
nir = []
for cc in list(c):
annotations = cc['annotations']
na = len(annotations)
for a in annotations:
if a.keys()[0] in bad_keys:
na -= 1
if 'ir' in a.keys():
if a['ir'] is 'No Sources' and a['radio'] is 'No Contours':
na -= 1
nir.append(na)
avg = np.mean(nir)
#print '%20s averages %.2f IR sources per image over %i classifications' % (u['name'],avg,classcount)
else: # Shouldn't happen with this list
print 'No classifications found for %s' % u['name']
avg = 0.
nir_username_volunteers.append(u['name'])
nir_avg_volunteers.append(avg)
nir_count_volunteers.append(classcount)
# If we eliminate users who average more than two IR sources per image, how much of the data would that reject?
# counts_lost = np.sum([c for a,b,c in zip(nir_username_volunteers, nir_avg_volunteers, nir_count_volunteers) if b > 2.0])
# Only 413 classifications. Negligible.
return nir_username_volunteers, nir_avg_volunteers, nir_count_volunteers, expert_avg
def plot_histogram_experts(names, avgs, counts, expert_avg):
xpairs = [[x,x] for x in expert_avg]
xlist = []
for xends in xpairs:
xlist.extend(xends)
xlist.append(None)
avg_cutoff = np.linspace(0,4,50)
frac_lost = []
for ac in avg_cutoff:
frac_lost.append(np.sum([c for a,c in zip(avgs,counts) if a > ac])/float(sum(counts)))
# Plot results
fig = plt.figure(1)
fig.clf()
ax1 = fig.add_subplot(221)
ax1.scatter(avgs,counts,color='black',marker='.',s=1,alpha=0.5)
ax1.set_xlabel('Mean IR sources per image')
ax1.set_ylabel('Total number of classifications')
ax1.set_yscale('log')
ax2 = fig.add_subplot(222)
ax2.plot(avg_cutoff,frac_lost,color='green',lw=3)
ax2.set_ylim(-0.02,1.02)
ax2.set_xlabel('Cutoff for IR sources/image')
ax2.set_ylabel('Fraction of data affected')
ax3 = fig.add_subplot(223)
ax3.hist(avgs,bins=np.linspace(0,4,100))
ax3.text(2.5,700,'All',fontsize=16)
ax3.set_xlabel('Mean IR sources per image')
ax3.set_ylabel('Count (users)')
ax4 = fig.add_subplot(224)
ax4.hist(np.array(avgs)[np.array(counts) > 10],bins=np.linspace(0,4,100),color='cyan')
ax4.text(2.5,250,r'$N_{class}>10$',fontsize=16)
ax4.set_xlabel('Mean IR sources per image')
ax4.set_ylabel('Count (users)')
ax4.set_xlim(ax3.get_xlim())
for ax in (ax2,ax3,ax4):
ypairs = [ax.get_ylim() for x in range(len(xpairs))]
ylist = []
for yends in ypairs:
ylist.extend(yends)
ylist.append(None)
ax.plot(xlist,ylist,color='red',alpha=0.5)
fig.show()
# Save hard copy of the figure
fig.savefig('%s/plots/histogram_avg_ir_sources.png' % rgz_dir)
return None
def update_experts(classifications,experts):
for ex in experts:
expert_dates = (dateutil.parser.parse(ex['started_at']),dateutil.parser.parse(ex['ended_at']))
classifications.update({"updated_at": {"$gt": expert_dates[0],"$lt":expert_dates[1]},"user_name":ex['expert_user']},{'$set':{'expert':True}},multi=True)
return None
########################################
# Call program from the command line
########################################
if __name__ == '__main__':
plt.ioff()
subjects,classifications = load_rgz_data()
experts = load_expert_parameters()
update_experts(classifications,experts)
# Experts on sample of 100
run_expert_sample(subjects,classifications)
#plot_npeaks()
# Volunteers on sample of 100
run_volunteer_sample(subjects,classifications)
#plot_npeaks(volunteers=True)
| mit |
m11s/MissionPlanner | Lib/site-packages/scipy/signal/fir_filter_design.py | 53 | 18572 | """Functions for FIR filter design."""
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
import sigtools
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21)**0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and stopband
(or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta :
The beta parameter for the kaiser window.
Notes
-----
There are several ways to obtain the Kaiser window:
signal.kaiser(numtaps, beta, sym=0)
signal.get_window(beta, numtaps)
signal.get_window(('kaiser', beta), numtaps)
The empirical equations discovered by Kaiser are used.
See Also
--------
kaiser_beta, kaiser_atten
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response filter.
The filter will have linear phase; it will be Type I if `numtaps` is odd and
Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True);
`nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : 1D ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
Examples
--------
Low-pass from 0 to f::
>>> firwin(numtaps, f)
Use a specific window function::
>>> firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>>firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
See also
--------
scipy.signal.firwin2
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width)/nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff is even,
# and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0]*pass_zero, cutoff, [1.0]*pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of a passband.
bands = cutoff.reshape(-1,2)
# Build up the coefficients.
alpha = 0.5 * (numtaps-1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Example
-------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s' % (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps,2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq)-1 and freq[k] == freq[k+1]:
freq[k] = freq[k] - eps
freq[k+1] = freq[k+1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps-1)/2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> bpass = sp.signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = sp.signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
[<matplotlib.lines.Line2D object at 0xf486790>]
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass':1, 'differentiator':2, 'hilbert':3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| gpl-3.0 |
datapythonista/pandas | pandas/tests/io/formats/test_to_excel.py | 6 | 12536 | """Tests formatting as writer-agnostic ExcelCells
ExcelFormatter is tested implicitly in pandas/tests/io/excel
"""
import string
import pytest
import pandas.util._test_decorators as td
import pandas._testing as tm
from pandas.io.formats.css import CSSWarning
from pandas.io.formats.excel import CSSToExcelConverter
@pytest.mark.parametrize(
"css,expected",
[
# FONT
# - name
("font-family: foo,bar", {"font": {"name": "foo"}}),
('font-family: "foo bar",baz', {"font": {"name": "foo bar"}}),
("font-family: foo,\nbar", {"font": {"name": "foo"}}),
("font-family: foo, bar, baz", {"font": {"name": "foo"}}),
("font-family: bar, foo", {"font": {"name": "bar"}}),
("font-family: 'foo bar', baz", {"font": {"name": "foo bar"}}),
("font-family: 'foo \\'bar', baz", {"font": {"name": "foo 'bar"}}),
('font-family: "foo \\"bar", baz', {"font": {"name": 'foo "bar'}}),
('font-family: "foo ,bar", baz', {"font": {"name": "foo ,bar"}}),
# - family
("font-family: serif", {"font": {"name": "serif", "family": 1}}),
("font-family: Serif", {"font": {"name": "serif", "family": 1}}),
("font-family: roman, serif", {"font": {"name": "roman", "family": 1}}),
("font-family: roman, sans-serif", {"font": {"name": "roman", "family": 2}}),
("font-family: roman, sans serif", {"font": {"name": "roman"}}),
("font-family: roman, sansserif", {"font": {"name": "roman"}}),
("font-family: roman, cursive", {"font": {"name": "roman", "family": 4}}),
("font-family: roman, fantasy", {"font": {"name": "roman", "family": 5}}),
# - size
("font-size: 1em", {"font": {"size": 12}}),
("font-size: xx-small", {"font": {"size": 6}}),
("font-size: x-small", {"font": {"size": 7.5}}),
("font-size: small", {"font": {"size": 9.6}}),
("font-size: medium", {"font": {"size": 12}}),
("font-size: large", {"font": {"size": 13.5}}),
("font-size: x-large", {"font": {"size": 18}}),
("font-size: xx-large", {"font": {"size": 24}}),
("font-size: 50%", {"font": {"size": 6}}),
# - bold
("font-weight: 100", {"font": {"bold": False}}),
("font-weight: 200", {"font": {"bold": False}}),
("font-weight: 300", {"font": {"bold": False}}),
("font-weight: 400", {"font": {"bold": False}}),
("font-weight: normal", {"font": {"bold": False}}),
("font-weight: lighter", {"font": {"bold": False}}),
("font-weight: bold", {"font": {"bold": True}}),
("font-weight: bolder", {"font": {"bold": True}}),
("font-weight: 700", {"font": {"bold": True}}),
("font-weight: 800", {"font": {"bold": True}}),
("font-weight: 900", {"font": {"bold": True}}),
# - italic
("font-style: italic", {"font": {"italic": True}}),
("font-style: oblique", {"font": {"italic": True}}),
# - underline
("text-decoration: underline", {"font": {"underline": "single"}}),
("text-decoration: overline", {}),
("text-decoration: none", {}),
# - strike
("text-decoration: line-through", {"font": {"strike": True}}),
(
"text-decoration: underline line-through",
{"font": {"strike": True, "underline": "single"}},
),
(
"text-decoration: underline; text-decoration: line-through",
{"font": {"strike": True}},
),
# - color
("color: red", {"font": {"color": "FF0000"}}),
("color: #ff0000", {"font": {"color": "FF0000"}}),
("color: #f0a", {"font": {"color": "FF00AA"}}),
# - shadow
("text-shadow: none", {"font": {"shadow": False}}),
("text-shadow: 0px -0em 0px #CCC", {"font": {"shadow": False}}),
("text-shadow: 0px -0em 0px #999", {"font": {"shadow": False}}),
("text-shadow: 0px -0em 0px", {"font": {"shadow": False}}),
("text-shadow: 2px -0em 0px #CCC", {"font": {"shadow": True}}),
("text-shadow: 0px -2em 0px #CCC", {"font": {"shadow": True}}),
("text-shadow: 0px -0em 2px #CCC", {"font": {"shadow": True}}),
("text-shadow: 0px -0em 2px", {"font": {"shadow": True}}),
("text-shadow: 0px -2em", {"font": {"shadow": True}}),
# FILL
# - color, fillType
(
"background-color: red",
{"fill": {"fgColor": "FF0000", "patternType": "solid"}},
),
(
"background-color: #ff0000",
{"fill": {"fgColor": "FF0000", "patternType": "solid"}},
),
(
"background-color: #f0a",
{"fill": {"fgColor": "FF00AA", "patternType": "solid"}},
),
# BORDER
# - style
(
"border-style: solid",
{
"border": {
"top": {"style": "medium"},
"bottom": {"style": "medium"},
"left": {"style": "medium"},
"right": {"style": "medium"},
}
},
),
(
"border-style: solid; border-width: thin",
{
"border": {
"top": {"style": "thin"},
"bottom": {"style": "thin"},
"left": {"style": "thin"},
"right": {"style": "thin"},
}
},
),
(
"border-top-style: solid; border-top-width: thin",
{"border": {"top": {"style": "thin"}}},
),
(
"border-top-style: solid; border-top-width: 1pt",
{"border": {"top": {"style": "thin"}}},
),
("border-top-style: solid", {"border": {"top": {"style": "medium"}}}),
(
"border-top-style: solid; border-top-width: medium",
{"border": {"top": {"style": "medium"}}},
),
(
"border-top-style: solid; border-top-width: 2pt",
{"border": {"top": {"style": "medium"}}},
),
(
"border-top-style: solid; border-top-width: thick",
{"border": {"top": {"style": "thick"}}},
),
(
"border-top-style: solid; border-top-width: 4pt",
{"border": {"top": {"style": "thick"}}},
),
(
"border-top-style: dotted",
{"border": {"top": {"style": "mediumDashDotDot"}}},
),
(
"border-top-style: dotted; border-top-width: thin",
{"border": {"top": {"style": "dotted"}}},
),
("border-top-style: dashed", {"border": {"top": {"style": "mediumDashed"}}}),
(
"border-top-style: dashed; border-top-width: thin",
{"border": {"top": {"style": "dashed"}}},
),
("border-top-style: double", {"border": {"top": {"style": "double"}}}),
# - color
(
"border-style: solid; border-color: #0000ff",
{
"border": {
"top": {"style": "medium", "color": "0000FF"},
"right": {"style": "medium", "color": "0000FF"},
"bottom": {"style": "medium", "color": "0000FF"},
"left": {"style": "medium", "color": "0000FF"},
}
},
),
(
"border-top-style: double; border-top-color: blue",
{"border": {"top": {"style": "double", "color": "0000FF"}}},
),
(
"border-top-style: solid; border-top-color: #06c",
{"border": {"top": {"style": "medium", "color": "0066CC"}}},
),
# ALIGNMENT
# - horizontal
("text-align: center", {"alignment": {"horizontal": "center"}}),
("text-align: left", {"alignment": {"horizontal": "left"}}),
("text-align: right", {"alignment": {"horizontal": "right"}}),
("text-align: justify", {"alignment": {"horizontal": "justify"}}),
# - vertical
("vertical-align: top", {"alignment": {"vertical": "top"}}),
("vertical-align: text-top", {"alignment": {"vertical": "top"}}),
("vertical-align: middle", {"alignment": {"vertical": "center"}}),
("vertical-align: bottom", {"alignment": {"vertical": "bottom"}}),
("vertical-align: text-bottom", {"alignment": {"vertical": "bottom"}}),
# - wrap_text
("white-space: nowrap", {"alignment": {"wrap_text": False}}),
("white-space: pre", {"alignment": {"wrap_text": False}}),
("white-space: pre-line", {"alignment": {"wrap_text": False}}),
("white-space: normal", {"alignment": {"wrap_text": True}}),
# NUMBER FORMAT
("number-format: 0%", {"number_format": {"format_code": "0%"}}),
],
)
def test_css_to_excel(css, expected):
convert = CSSToExcelConverter()
assert expected == convert(css)
def test_css_to_excel_multiple():
convert = CSSToExcelConverter()
actual = convert(
"""
font-weight: bold;
text-decoration: underline;
color: red;
border-width: thin;
text-align: center;
vertical-align: top;
unused: something;
"""
)
assert {
"font": {"bold": True, "underline": "single", "color": "FF0000"},
"border": {
"top": {"style": "thin"},
"right": {"style": "thin"},
"bottom": {"style": "thin"},
"left": {"style": "thin"},
},
"alignment": {"horizontal": "center", "vertical": "top"},
} == actual
@pytest.mark.parametrize(
"css,inherited,expected",
[
("font-weight: bold", "", {"font": {"bold": True}}),
("", "font-weight: bold", {"font": {"bold": True}}),
(
"font-weight: bold",
"font-style: italic",
{"font": {"bold": True, "italic": True}},
),
("font-style: normal", "font-style: italic", {"font": {"italic": False}}),
("font-style: inherit", "", {}),
(
"font-style: normal; font-style: inherit",
"font-style: italic",
{"font": {"italic": True}},
),
],
)
def test_css_to_excel_inherited(css, inherited, expected):
convert = CSSToExcelConverter(inherited)
assert expected == convert(css)
@pytest.mark.parametrize(
"input_color,output_color",
(
list(CSSToExcelConverter.NAMED_COLORS.items())
+ [("#" + rgb, rgb) for rgb in CSSToExcelConverter.NAMED_COLORS.values()]
+ [("#F0F", "FF00FF"), ("#ABC", "AABBCC")]
),
)
def test_css_to_excel_good_colors(input_color, output_color):
# see gh-18392
css = (
f"border-top-color: {input_color}; "
f"border-right-color: {input_color}; "
f"border-bottom-color: {input_color}; "
f"border-left-color: {input_color}; "
f"background-color: {input_color}; "
f"color: {input_color}"
)
expected = {}
expected["fill"] = {"patternType": "solid", "fgColor": output_color}
expected["font"] = {"color": output_color}
expected["border"] = {
k: {"color": output_color} for k in ("top", "right", "bottom", "left")
}
with tm.assert_produces_warning(None):
convert = CSSToExcelConverter()
assert expected == convert(css)
@pytest.mark.parametrize("input_color", [None, "not-a-color"])
def test_css_to_excel_bad_colors(input_color):
# see gh-18392
css = (
f"border-top-color: {input_color}; "
f"border-right-color: {input_color}; "
f"border-bottom-color: {input_color}; "
f"border-left-color: {input_color}; "
f"background-color: {input_color}; "
f"color: {input_color}"
)
expected = {}
if input_color is not None:
expected["fill"] = {"patternType": "solid"}
with tm.assert_produces_warning(CSSWarning):
convert = CSSToExcelConverter()
assert expected == convert(css)
def tests_css_named_colors_valid():
upper_hexs = set(map(str.upper, string.hexdigits))
for color in CSSToExcelConverter.NAMED_COLORS.values():
assert len(color) == 6 and all(c in upper_hexs for c in color)
@td.skip_if_no_mpl
def test_css_named_colors_from_mpl_present():
from matplotlib.colors import CSS4_COLORS as mpl_colors
pd_colors = CSSToExcelConverter.NAMED_COLORS
for name, color in mpl_colors.items():
assert name in pd_colors and pd_colors[name] == color[1:]
| bsd-3-clause |
ktaneishi/deepchem | examples/uv/UV_rf_model.py | 4 | 3422 | """
Script that trains RF model on UV datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import tempfile
import shutil
import deepchem as dc
from sklearn.ensemble import RandomForestRegressor
from UV_datasets import load_uv
###Load data###
np.random.seed(123)
shard_size = 2000
num_trials = 5
print("About to load UV data.")
UV_tasks, datasets, transformers = load_uv(shard_size=shard_size)
train_dataset, valid_dataset, test_dataset = datasets
####################################################### DEBUG
print("np.amin(train_dataset.y)")
print(np.amin(train_dataset.y))
print("np.amax(train_dataset.y)")
print(np.amax(train_dataset.y))
####################################################### DEBUG
print("Number of compounds in train set")
print(len(train_dataset))
print("Number of compounds in validation set")
print(len(valid_dataset))
print("Number of compounds in test set")
print(len(test_dataset))
num_features = train_dataset.get_data_shape()[0]
print("Num features: %d" % num_features)
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
def task_model_builder(model_dir):
sklearn_model = RandomForestRegressor(
n_estimators=100, max_features=int(num_features/3),
min_samples_split=5, n_jobs=-1)
return dc.models.SklearnModel(sklearn_model, model_dir)
all_results = []
for trial in range(num_trials):
print("Starting trial %d" % trial)
model = dc.models.SingletaskToMultitask(UV_tasks, task_model_builder)
print("Training model")
model.fit(train_dataset)
print("Evaluating models")
train_score, train_task_scores = model.evaluate(
train_dataset, [metric], transformers, per_task_metrics=True)
valid_score, valid_task_scores = model.evaluate(
valid_dataset, [metric], transformers, per_task_metrics=True)
test_score, test_task_scores = model.evaluate(
test_dataset, [metric], transformers, per_task_metrics=True)
all_results.append((train_score, train_task_scores,
valid_score, valid_task_scores,
test_score, test_task_scores))
print("----------------------------------------------------------------")
print("Scores for trial %d" % trial)
print("----------------------------------------------------------------")
print("train_task_scores")
print(train_task_scores)
print("Mean Train score")
print(train_score)
print("valid_task_scores")
print(valid_task_scores)
print("Mean Validation score")
print(valid_score)
print("test_task_scores")
print(test_task_scores)
print("Mean Test score")
print(test_score)
print("####################################################################")
for trial in range(num_trials):
(train_score, train_task_scores, valid_score, valid_task_scores,
test_score, test_task_scores) = all_results[trial]
print("----------------------------------------------------------------")
print("Scores for trial %d" % trial)
print("----------------------------------------------------------------")
print("train_task_scores")
print(train_task_scores)
print("Mean Train score")
print(train_score)
print("valid_task_scores")
print(valid_task_scores)
print("Mean Validation score")
print(valid_score)
print("test_task_scores")
print(test_task_scores)
print("Mean Test score")
print(test_score)
| mit |
alexvicegrab/tango_master | organiser/tango_search.py | 1 | 6641 | import logging
import os
import pickle
import string
from unidecode import unidecode
from blessings import Terminal
import numpy as np
import pandas as pd
from .tm_utils import clean_string
TERM = Terminal()
log = logging.getLogger(__name__)
class Singleton(type):
def __init__(cls, *args, **kwargs):
cls.__instance = None
super().__init__(*args, **kwargs)
def __call__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = super().__call__(*args, **kwargs)
return cls.__instance
else:
return cls.__instance
class Searcher(metaclass=Singleton):
@staticmethod
def _fix_director(value):
value = value.replace("OT", "").strip() # Remove Orquesta Tipica
value = unidecode(value.lower())
value = value.split("[")[0]
value = value.split("(")[0]
return value.split(", ")[0] # Return last name of Director
@staticmethod
def _fix_genre(value):
value = unidecode(value.lower())
value = value.split("[")[0]
value = value.split("(")[0]
return value.strip()
@staticmethod
def _fix_singers(value):
value = unidecode(value.lower())
value = value.split("&")[0]
return value.strip()
@staticmethod
def _fix_title(value):
value = unidecode(value.lower())
value = value.split("[")[0]
value = value.split("(")[0]
value = value.replace(",", "")
return value.strip()
def parse_search(self, search_dict):
search_dict = dict(search_dict)
for key, value in search_dict.items():
if key == "Director":
search_dict[key] = self._fix_director(value)
if key == "Title":
search_dict[key] = self._fix_title(value)
if key == "Singers":
search_dict[key] = self._fix_singers(value)
if key == "Genre":
search_dict[key] = self._fix_genre(value)
return search_dict
class TangoDjAt(Searcher):
def __init__(self):
self.base_str = "http://www.tango-dj.at/database/index.htm?"
self.key_dict = {
"Title": "title",
"Album": "album",
"Director": "orquestra",
"Singers": "vocalist",
"Genre": "genre",
"Year": "year",
}
def search_url(self, search_dict):
search_url = self.base_str
search_dict = self.parse_search(search_dict)
for key, value in search_dict.items():
if key in self.key_dict:
search_url += self.key_dict[key] + "search=" + value + "&"
search_url += "advsearch=Search"
search_url = search_url.replace(" ", "+")
search_url = unidecode(search_url)
return search_url
@staticmethod
def parse_table(search_url):
table = pd.read_html(search_url)[0]
table.columns = [col.strip() for col in table.columns]
table["Date"] = [
(val if type(val) in [bytes, str] else np.nan) for val in table["Date"]
]
table["Date"] = pd.to_datetime(table["Date"], dayfirst=True, errors="coerce")
return table
class TangoInfo(Searcher):
def __init__(self, save_file="./tango_info.pickle"):
self.base_str = "https://tango.info/"
self.key_dict = {
"Title Clean": "Title",
"Director": "Instrumentalist(s)",
"Singers": "Vocalist(s)",
"Genre": "Genre",
"Year": "Perf. date",
}
self.save_file = save_file
if os.path.isfile(self.save_file):
log.info(TERM.magenta("Load saved Tango.info database"))
self.db = pickle.load(open(self.save_file, "rb"))
else:
self.db = {}
self.get_performances()
self.get_works()
log.info(TERM.green("Saving database to file"))
pickle.dump(self.db, open(self.save_file, "wb"))
@staticmethod
def _clean_title(row):
if row["Alt. title(s)"]:
return row["Title"] + " (" + row["Alt. title(s)"] + ")"
else:
return row["Title"]
@staticmethod
def _composer_string(row):
return f"Comp: <{row['Composer(s)']}> || Lyr: <{row['Lyricist(s)']}>"
def get_performances(self):
log.info(TERM.magenta("Getting Tango.info performances"))
frames = [
pd.read_html(self.base_str + "performances/" + letter)[0]
for letter in string.ascii_uppercase
]
df = pd.concat(frames)
del df["info"]
del df["Track qty"]
del df["Lang."]
df.dropna(
subset=["Instrumentalist(s)"], # ["Perf. date", "Instrumentalist(s)"]
inplace=True,
)
df["Perf. date"] = df["Perf. date"].fillna("")
df.loc[
-df["Perf. date"].str.contains(r"^[\d]{4}-[\d]{2}-[\d]{2}$"), "Perf. date"
] = ""
# df = df[df['Perf. date'].str.contains(r'^[\d]{4}-[\d]{2}-[\d]{2}$')]
df["Vocalist(s)"] = df["Vocalist(s)"].replace(np.nan, "")
df["Title"] = df["Title"].map(clean_string)
self.db["performances"] = df
def get_works(self):
log.info(TERM.magenta("Getting Tango.info works"))
frames = [
pd.read_html(self.base_str + "works/" + letter)[0]
for letter in string.ascii_uppercase
]
df = pd.concat(frames)
del df["Track qty"]
del df["Video qty"]
del df["PD"]
del df["info"]
df.dropna(subset=["Composer(s)"], inplace=True)
df["Lyricist(s)"].replace(np.nan, "", inplace=True)
df["Lyricist(s)"].replace("-", "", inplace=True)
df["Alt. title(s)"].replace(np.nan, "", inplace=True)
df["Genre"].replace(np.nan, "", inplace=True)
# Clean titles
df["Title"] = df.apply(self._clean_title, axis=1)
del df["Alt. title(s)"]
df["Title"] = df["Title"].map(clean_string)
self.db["works"] = df
def search(self, search_dict, field="performances"):
search_dict = self.parse_search(search_dict)
table = self.db[field]
for key, value in search_dict.items():
if key in self.key_dict:
table = table[
table[self.key_dict[key]]
.str.lower()
.map(unidecode)
.str.contains(str(value))
]
if field == "performances":
table["Perf. date"] = pd.to_datetime(table["Perf. date"], dayfirst=True)
return table
| gpl-3.0 |
rbharath/deepchem | deepchem/metrics/__init__.py | 2 | 10120 | """Evaluation metrics."""
import numpy as np
import warnings
from deepchem.utils.save import log
from sklearn.metrics import roc_auc_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc
from scipy.stats import pearsonr
def to_one_hot(y, n_classes=2):
"""Transforms label vector into one-hot encoding.
Turns y into vector of shape [n_samples, 2] (assuming binary labels).
y: np.ndarray
A vector of shape [n_samples, 1]
"""
n_samples = np.shape(y)[0]
y_hot = np.zeros((n_samples, n_classes))
y_hot[np.arange(n_samples), y.astype(np.int64)] = 1
return y_hot
def from_one_hot(y, axis=1):
"""Transorms label vector from one-hot encoding.
y: np.ndarray
A vector of shape [n_samples, num_classes]
"""
return np.argmax(y, axis=axis)
def compute_roc_auc_scores(y, y_pred):
"""Transforms the results dict into roc-auc-scores and prints scores.
Parameters
----------
results: dict
task_types: dict
dict mapping task names to output type. Each output type must be either
"classification" or "regression".
"""
try:
score = roc_auc_score(y, y_pred)
except ValueError:
warnings.warn("ROC AUC score calculation failed.")
score = 0.5
return score
def balanced_accuracy_score(y, y_pred):
"""Computes balanced accuracy score."""
num_positive = float(np.count_nonzero(y))
num_negative = float(len(y) - num_positive)
pos_weight = num_negative / num_positive
weights = np.ones_like(y)
weights[y != 0] = pos_weight
return accuracy_score(y, y_pred, sample_weight=weights)
def pearson_r2_score(y, y_pred):
"""Computes Pearson R^2 (square of Pearson correlation)."""
return pearsonr(y, y_pred)[0]**2
def prc_auc_score(y, y_pred):
"""Compute area under precision-recall curve"""
assert y_pred.shape == y.shape
assert y_pred.shape[1] == 2
precision, recall, _ = precision_recall_curve(y[:, 1], y_pred[:, 1])
return auc(recall, precision)
def rms_score(y_true, y_pred):
"""Computes RMS error."""
return np.sqrt(mean_squared_error(y_true, y_pred))
def mae_score(y_true, y_pred):
"""Computes MAE."""
return mean_absolute_error(y_true, y_pred)
def kappa_score(y_true, y_pred):
"""Calculate Cohen's kappa for classification tasks.
See https://en.wikipedia.org/wiki/Cohen%27s_kappa
Note that this implementation of Cohen's kappa expects binary labels.
Args:
y_true: Numpy array containing true values.
y_pred: Numpy array containing predicted values.
Returns:
kappa: Numpy array containing kappa for each classification task.
Raises:
AssertionError: If y_true and y_pred are not the same size, or if class
labels are not in [0, 1].
"""
assert len(y_true) == len(y_pred), 'Number of examples does not match.'
yt = np.asarray(y_true, dtype=int)
yp = np.asarray(y_pred, dtype=int)
assert np.array_equal(
np.unique(yt), [0,
1]), ('Class labels must be binary: %s' % np.unique(yt))
observed_agreement = np.true_divide(
np.count_nonzero(np.equal(yt, yp)), len(yt))
expected_agreement = np.true_divide(
np.count_nonzero(yt == 1) * np.count_nonzero(yp == 1) +
np.count_nonzero(yt == 0) * np.count_nonzero(yp == 0), len(yt)**2)
kappa = np.true_divide(observed_agreement - expected_agreement,
1.0 - expected_agreement)
return kappa
class Metric(object):
"""Wrapper class for computing user-defined metrics."""
def __init__(self,
metric,
task_averager=None,
name=None,
threshold=None,
verbose=True,
mode=None,
compute_energy_metric=False):
"""
Args:
metric: function that takes args y_true, y_pred (in that order) and
computes desired score.
task_averager: If not None, should be a function that averages metrics
across tasks. For example, task_averager=np.mean. If task_averager
is provided, this task will be inherited as a multitask metric.
"""
self.metric = metric
self.task_averager = task_averager
self.is_multitask = (self.task_averager is not None)
if name is None:
if not self.is_multitask:
self.name = self.metric.__name__
else:
self.name = self.task_averager.__name__ + "-" + self.metric.__name__
else:
self.name = name
self.verbose = verbose
self.threshold = threshold
if mode is None:
if self.metric.__name__ in [
"roc_auc_score", "matthews_corrcoef", "recall_score",
"accuracy_score", "kappa_score", "precision_score",
"balanced_accuracy_score", "prc_auc_score"
]:
mode = "classification"
elif self.metric.__name__ in [
"pearson_r2_score", "r2_score", "mean_squared_error",
"mean_absolute_error", "rms_score", "mae_score"
]:
mode = "regression"
else:
raise ValueError("Must specify mode for new metric.")
assert mode in ["classification", "regression"]
self.mode = mode
# The convention used is that the first task is the metric.
# TODO(rbharath, joegomes): This doesn't seem like it should be hard-coded as
# an option in the Metric class. Instead, this should be possible to move into
# user-space as a custom task_averager function.
self.compute_energy_metric = compute_energy_metric
def compute_metric(self,
y_true,
y_pred,
w=None,
n_classes=2,
filter_nans=True,
per_task_metrics=False):
"""Compute a performance metric for each task.
Parameters
----------
y_true: np.ndarray
An np.ndarray containing true values for each task.
y_pred: np.ndarray
An np.ndarray containing predicted values for each task.
w: np.ndarray, optional
An np.ndarray containing weights for each datapoint.
n_classes: int, optional
Number of classes in data for classification tasks.
filter_nans: bool, optional
Remove NaN values in computed metrics
per_task_metrics: bool, optional
If true, return computed metric for each task on multitask dataset.
Returns
-------
A numpy nd.array containing metric values for each task.
"""
if len(y_true.shape) > 1:
n_samples, n_tasks = y_true.shape[0], y_true.shape[1]
else:
n_samples, n_tasks = y_true.shape[0], 1
if self.mode == "classification":
y_pred = np.reshape(y_pred, (n_samples, n_tasks, n_classes))
else:
y_pred = np.reshape(y_pred, (n_samples, n_tasks))
y_true = np.reshape(y_true, (n_samples, n_tasks))
if w is None or len(w) == 0:
w = np.ones_like(y_true)
assert y_true.shape[0] == y_pred.shape[0] == w.shape[0]
computed_metrics = []
for task in range(n_tasks):
y_task = y_true[:, task]
if self.mode == "regression":
y_pred_task = y_pred[:, task]
else:
y_pred_task = y_pred[:, task, :]
w_task = w[:, task]
metric_value = self.compute_singletask_metric(y_task, y_pred_task, w_task)
computed_metrics.append(metric_value)
log("computed_metrics: %s" % str(computed_metrics), self.verbose)
if n_tasks == 1:
computed_metrics = computed_metrics[0]
if not self.is_multitask:
return computed_metrics
else:
if filter_nans:
computed_metrics = np.array(computed_metrics)
computed_metrics = computed_metrics[~np.isnan(computed_metrics)]
if self.compute_energy_metric:
# TODO(rbharath, joegomes): What is this magic number?
force_error = self.task_averager(computed_metrics[1:]) * 4961.47596096
print("Force error (metric: np.mean(%s)): %f kJ/mol/A" % (self.name,
force_error))
return computed_metrics[0]
elif not per_task_metrics:
return self.task_averager(computed_metrics)
else:
return self.task_averager(computed_metrics), computed_metrics
def compute_singletask_metric(self, y_true, y_pred, w):
"""Compute a metric value.
Args:
y_true: A list of arrays containing true values for each task.
y_pred: A list of arrays containing predicted values for each task.
Returns:
Float metric value.
Raises:
NotImplementedError: If metric_str is not in METRICS.
"""
y_true = np.array(np.squeeze(y_true[w != 0]))
y_pred = np.array(np.squeeze(y_pred[w != 0]))
if len(y_true.shape) == 0:
n_samples = 1
else:
n_samples = y_true.shape[0]
# If there are no nonzero examples, metric is ill-defined.
if not y_true.size:
return np.nan
y_true = np.reshape(y_true, (n_samples,))
if self.mode == "classification":
n_classes = y_pred.shape[-1]
# TODO(rbharath): This has been a major source of bugs. Is there a more
# robust characterization of which metrics require class-probs and which
# don't?
if "roc_auc_score" in self.name or "prc_auc_score" in self.name:
y_true = to_one_hot(y_true).astype(int)
y_pred = np.reshape(y_pred, (n_samples, n_classes))
else:
y_true = y_true.astype(int)
# Reshape to handle 1-d edge cases
y_pred = np.reshape(y_pred, (n_samples, n_classes))
y_pred = from_one_hot(y_pred)
else:
y_pred = np.reshape(y_pred, (n_samples,))
if self.threshold is not None:
y_pred = np.greater(y_pred, threshold)
try:
metric_value = self.metric(y_true, y_pred)
except (AssertionError, ValueError) as e:
warnings.warn("Error calculating metric %s: %s" % (self.name, e))
metric_value = np.nan
return metric_value
| mit |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tests/indexes/test_datetimelike.py | 7 | 52802 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.tslib import Timestamp, OutOfBoundsDatetime
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_index_with_mixed_timezones_with_NaT(self):
# GH 11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# different tz coerces tz-naive to tz-awareIndex(dtype=object)
result = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 05:00'),
Timestamp('2011-01-02 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
with tm.assertRaisesRegexp(TypeError, 'data is already tz-aware'):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with tm.assertRaisesRegexp(TypeError, 'data is already tz-aware'):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with tm.assertRaises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returing NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[D]')
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_tz(self):
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
if method is not None:
self.assertEqual(idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')),
1)
self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('1999', method='nearest'), 0)
self.assertEqual(idx.get_loc('2001', method='nearest'), 2)
with tm.assertRaises(KeyError):
idx.get_loc('1999', method='pad')
with tm.assertRaises(KeyError):
idx.get_loc('2001', method='backfill')
with tm.assertRaises(KeyError):
idx.get_loc('foobar')
with tm.assertRaises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
self.assertEqual(idx.get_loc('2000-01-02', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-03', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 2))
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with tm.assertRaises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
with tm.assertRaises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = self.round_trip_pickle(index)
self.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
def test_time_loc(self): # GH8667
from datetime import time
from pandas.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
self.assertEqual(len(idx1), periods)
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
self.assertEqual(len(idx2), periods)
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
self.assertTrue(tm.equalContents(result, second))
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
self.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
self.assertTrue(tm.equalContents(result, everything))
def test_nat(self):
self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT)
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '3D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
def test_fillna_datetime64(self):
# GH 11343
for tz in ['US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'])
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'])
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# tz mismatch
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00', tz=tz),
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x',
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], tz=tz)
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], tz=tz)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
'x',
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
def test_difference_of_union(self):
# GH14323: Test taking the union of differences of an Index.
# Difference of DatetimeIndex does not preserve frequency,
# so a differencing operation should not retain the freq field of the
# original index.
i = pd.date_range("20160920", "20160925", freq="D")
a = pd.date_range("20160921", "20160924", freq="D")
expected = pd.DatetimeIndex(["20160920", "20160925"], freq=None)
a_diff = i.difference(a)
tm.assert_index_equal(a_diff, expected)
tm.assert_attr_equal('freq', a_diff, expected)
b = pd.date_range("20160922", "20160925", freq="D")
b_diff = i.difference(b)
expected = pd.DatetimeIndex(["20160920", "20160921"], freq=None)
tm.assert_index_equal(b_diff, expected)
tm.assert_attr_equal('freq', b_diff, expected)
union_of_diff = a_diff.union(b_diff)
expected = pd.DatetimeIndex(["20160920", "20160921", "20160925"],
freq=None)
tm.assert_index_equal(union_of_diff, expected)
tm.assert_attr_equal('freq', union_of_diff, expected)
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makePeriodIndex(10))
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_astype(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_index_equal(result, Index(idx.asi8))
self.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
def test_shift(self):
# test shift for PeriodIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_get_loc(self):
idx = pd.period_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(
idx.get_loc(idx[1].asfreq('H', how='start'), method), 1)
self.assertEqual(idx.get_loc(idx[1].to_timestamp(), method), 1)
self.assertEqual(
idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
idx = pd.period_range('2000-01-01', periods=5)[::2]
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
def test_where(self):
i = self.create_index()
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = self.create_index()
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 hour'),
np.array([0, -1, 1], dtype=np.intp))
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with self.assertRaisesRegexp(ValueError, msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.intp))
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
self.assert_index_equal(res, exp)
self.assertEqual(res.freqstr, 'D')
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range('2002-01', '2003-12', freq='M')
df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
self.assert_frame_equal(df, df.ix[idx])
self.assert_frame_equal(df, df.ix[list(idx)])
self.assert_frame_equal(df, df.loc[list(idx)])
self.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
self.assert_frame_equal(df, df.loc[list(idx)])
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H')
self.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'),
pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
self.assert_index_equal(idx.fillna(pd.Period('2011-01-01', freq='D')),
exp)
def test_no_millisecond_field(self):
with self.assertRaises(AttributeError):
DatetimeIndex.millisecond
with self.assertRaises(AttributeError):
DatetimeIndex([]).millisecond
def test_difference_of_union(self):
# GH14323: Test taking the union of differences of an Index.
# Difference of Period MUST preserve frequency, but the ability
# to union results must be preserved
i = pd.period_range("20160920", "20160925", freq="D")
a = pd.period_range("20160921", "20160924", freq="D")
expected = pd.PeriodIndex(["20160920", "20160925"], freq='D')
a_diff = i.difference(a)
tm.assert_index_equal(a_diff, expected)
tm.assert_attr_equal('freq', a_diff, expected)
b = pd.period_range("20160922", "20160925", freq="D")
b_diff = i.difference(b)
expected = pd.PeriodIndex(["20160920", "20160921"], freq='D')
tm.assert_index_equal(b_diff, expected)
tm.assert_attr_equal('freq', b_diff, expected)
union_of_diff = a_diff.union(b_diff)
expected = pd.PeriodIndex(["20160920", "20160921", "20160925"],
freq='D')
tm.assert_index_equal(union_of_diff, expected)
tm.assert_attr_equal('freq', union_of_diff, expected)
class TestTimedeltaIndex(DatetimeLike, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeTimedeltaIndex(10))
self.setup_indices()
def create_index(self):
return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_construction_base_constructor(self):
arr = [pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')]
tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.TimedeltaIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timedelta('1 days')]
tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.TimedeltaIndex(np.array(arr)))
def test_shift(self):
# test shift for TimedeltaIndex
# err8083
drange = self.create_index()
result = drange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
self.assert_index_equal(result, expected)
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [pd.NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[ns]')
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pytimedelta(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=pd.Timedelta(0)), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')), 1)
self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc('1 day 1 hour', method), loc)
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
res = idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour'))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
def test_numeric_compat(self):
idx = self._holder(np.arange(5, dtype='int64'))
didx = self._holder(np.arange(5, dtype='int64') ** 2)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result,
self._holder(np.arange(5, dtype='int64') * 5))
result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='float64') + 0.1)
tm.assert_index_equal(result, self._holder(np.arange(
5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
# invalid
self.assertRaises(TypeError, lambda: idx * idx)
self.assertRaises(ValueError, lambda: idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda: idx * np.array([1, 2]))
def test_pickle_compat_construction(self):
pass
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4H')
for result in [idx / 2, np.divide(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'H')
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2H')
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_fillna_timedelta(self):
# GH 11343
idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])
exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])
self.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)
exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])
idx.fillna(pd.Timedelta('3 hour'))
exp = pd.Index(
[pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
def test_difference_of_union(self):
# GH14323: Test taking the union of differences of an Index.
# Difference of TimedeltaIndex does not preserve frequency,
# so a differencing operation should not retain the freq field of the
# original index.
i = pd.timedelta_range("0 days", "5 days", freq="D")
a = pd.timedelta_range("1 days", "4 days", freq="D")
expected = pd.TimedeltaIndex(["0 days", "5 days"], freq=None)
a_diff = i.difference(a)
tm.assert_index_equal(a_diff, expected)
tm.assert_attr_equal('freq', a_diff, expected)
b = pd.timedelta_range("2 days", "5 days", freq="D")
b_diff = i.difference(b)
expected = pd.TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(b_diff, expected)
tm.assert_attr_equal('freq', b_diff, expected)
union_of_difference = a_diff.union(b_diff)
expected = pd.TimedeltaIndex(["0 days", "1 days", "5 days"],
freq=None)
tm.assert_index_equal(union_of_difference, expected)
tm.assert_attr_equal('freq', union_of_difference, expected)
| apache-2.0 |
CellModels/tyssue | tyssue/geometry/base_geometry.py | 2 | 5194 | import numpy as np
from ..utils.utils import to_nd
class BaseGeometry:
"""
"""
@staticmethod
def update_all(sheet):
raise NotImplementedError
@staticmethod
def scale(sheet, delta, coords):
""" Scales the coordinates `coords`
by a factor `delta`
"""
sheet.vert_df[coords] = sheet.vert_df[coords] * delta
@staticmethod
def update_dcoords(sheet):
"""
Update the edge vector coordinates on the
`coords` basis (`default_coords` by default).
Modifies the corresponding
columns (i.e `['dx', 'dy', 'dz']`) in sheet.edge_df.
Also updates the upcasted coordinates of the source and target
vertices
"""
if sheet.settings.get("boundaries") is None:
data = sheet.vert_df[sheet.coords]
srce_pos = sheet.upcast_srce(data)
trgt_pos = sheet.upcast_trgt(data)
sheet.edge_df[["s" + c for c in sheet.coords]] = srce_pos
sheet.edge_df[["t" + c for c in sheet.coords]] = trgt_pos
sheet.edge_df[sheet.dcoords] = trgt_pos - srce_pos
else:
update_periodic_dcoords(sheet)
@staticmethod
def update_ucoords(sheet):
sheet.edge_df[sheet.ucoords] = sheet.edge_df[sheet.dcoords] / to_nd(
sheet.edge_df["length"], sheet.dim
)
@staticmethod
def update_length(sheet):
"""
Updates the edge_df `length` column on the `coords` basis
"""
sheet.edge_df["length"] = np.linalg.norm(sheet.edge_df[sheet.dcoords], axis=1)
@staticmethod
def update_perimeters(sheet):
"""
Updates the perimeter of each face.
"""
sheet.face_df["perimeter"] = sheet.sum_face(sheet.edge_df["length"])
@staticmethod
def update_centroid(sheet):
"""
Updates the face_df `coords` columns as the face's vertices
center of mass. Also updates the edge_df fx, fy, fz columns
with their upcasted values
"""
scoords = ["s" + c for c in sheet.coords]
sheet.face_df[sheet.coords] = sheet.edge_df.groupby("face")[scoords].mean()
face_pos = sheet.upcast_face(sheet.face_df[sheet.coords])
for c in sheet.coords:
sheet.edge_df["f" + c] = face_pos[c]
sheet.edge_df["r" + c] = sheet.edge_df["s" + c] - sheet.edge_df["f" + c]
@staticmethod
def center(eptm):
"""
Transates the epithelium vertices so that the center
of mass is at the center of the coordinate system,
and updates the geometry
"""
eptm.vert_df[eptm.coords] = (
eptm.vert_df[eptm.coords].values
- eptm.vert_df[eptm.coords].mean(axis=0).values[np.newaxis, :]
)
@staticmethod
def dist_to_point(vert_df, point, coords):
"""
Returns the distance of all vertices from point over the
coordinates
Parameters
----------
vert_df: a :class:`pandas.DataFrame` with the points coordinates
in the columns given by the `coords` argument
point: a doublet (in 2D) or triplet (in 3D) giving the reference point
coordinates
coords: list of 2 or 3 strings giving the column names
Returns
-------
distance: a :class:`pandas.Series` with the same length
as the input `vert_df`
"""
return sum(((vert_df[c] - u) ** 2 for c, u in zip(coords, point))) ** 0.5
def update_periodic_dcoords(sheet):
""" Updates the coordinates for periodic boundary conditions.
"""
for u, boundary in sheet.settings["boundaries"].items():
period = boundary[1] - boundary[0]
shift = period * (
-(sheet.vert_df[u] > boundary[1]).astype(float)
+ (sheet.vert_df[u] <= boundary[0]).astype(float)
)
sheet.vert_df[u] = sheet.vert_df[u] + shift
srce_pos = sheet.upcast_srce(sheet.vert_df[sheet.coords])
trgt_pos = sheet.upcast_trgt(sheet.vert_df[sheet.coords])
sheet.edge_df[["s" + u for u in sheet.coords]] = srce_pos
sheet.edge_df[["t" + u for u in sheet.coords]] = trgt_pos
sheet.edge_df[sheet.dcoords] = trgt_pos - srce_pos
for u, boundary in sheet.settings["boundaries"].items():
period = boundary[1] - boundary[0]
center = boundary[1] - period / 2
shift = period * (
-(sheet.edge_df["d" + u] >= period / 2).astype(float)
+ (sheet.edge_df["d" + u] < -period / 2).astype(float)
)
sheet.edge_df["d" + u] = sheet.edge_df["d" + u] + shift
sheet.edge_df[f"at_{u}_boundary"] = shift != 0
sheet.face_df[f"at_{u}_boundary"] = sheet.edge_df.groupby("face")[
f"at_{u}_boundary"
].apply(any)
f_at_boundary = sheet.upcast_face(sheet.face_df[f"at_{u}_boundary"]).astype(int)
period = boundary[1] - boundary[0]
srce_shift = f_at_boundary * (sheet.edge_df["s" + u] < center) * period
trgt_shift = f_at_boundary * (sheet.edge_df["t" + u] < center) * period
sheet.edge_df["s" + u] += srce_shift
sheet.edge_df["t" + u] += trgt_shift
| gpl-2.0 |
alexandrwang/6882project | engel/experiment.py | 1 | 3750 | import numpy as np
class GPTDExperiment(object):
""" An experiment matches up a task with an agent and handles their interactions.
"""
def __init__(self, task, agent):
self.task = task
self.agent = agent
self.stepid = 0
def doInteractions(self, iters=1):
for _ in range(iters):
self._oneInteraction()
return self.stepid
def _oneInteraction(self):
""" Give the observation to the agent, takes its resulting action and returns
it to the task. Then gives the reward to the agent again and returns it.
"""
self.stepid += 1
# observations from tasks are vectors
self.agent.integrateObservation(self.task.getObservation())
self.task.performAction(self.agent.getAction())
reward = self.task.getReward()
newstate = self.task.getObservation()
self.agent.getReward(reward)
return reward
if __name__=="__main__":
from maze import FlagMaze, FlagMazeTask
from module import GPTDModule
from agent import GPTDAgent
# env = Loop()
# task = LoopTask(env)
# env = Chain()
# task = ChainTask(env)
# struct = np.array([[0, 0, 0, 0, 0],
# [0, 1, 1, 0, 0],
# [0, 1, 1, 1, 0],
# [0, 1, 1, 1, 0],
# [0, 1, 0, 1, 0],
# [0, 0, 0, 0, 0]])
# flagPos = [(3, 1)]
# goal = (3, 3)
struct = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
struct = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
flagPos = [(4,1), (4, 5), (2, 9), (1,11), (7, 7), (10, 1), (7, 9), (4, 3), (5,9), (5, 11), (11,11), (11,12), (12,12), (9,4), (9,5)]
goal = (1, 12)
env = FlagMaze(struct, flagPos, goal)
task = FlagMazeTask(env)
module = GPTDModule(env.start, 0)
agent = GPTDAgent(module, env.start, 0)
exp = GPTDExperiment(task, agent)
reward = 0
xs = []
ys = []
import matplotlib.pyplot as plt
for i in xrange(0):
print i
exp.doInteractions(1)
reward += agent.lastreward
if i%50 == 0:
xs.append(i)
ys.append(reward)
if agent.lastreward > 0:
print "ACTION:",agent.lastaction, "STATE:",agent.laststate, "REWARD:",agent.lastreward
print env.curPos
print "TOTAL REWARD:", reward
print ys
plt.plot(xs, ys)
plt.show()
env.showMaze()
| mit |
mikebenfield/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
gotomypc/scikit-learn | sklearn/preprocessing/tests/test_data.py | 113 | 38432 | import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.utils.validation import DataConversionWarning
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
def test_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X = np.ones(5)
assert_array_equal(scale(X, with_mean=False), X)
def test_standard_scaler_numerical_stability():
"""Test numerical stability of scaling"""
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(X_scaled.min(axis=0), 0.0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.0)
# Constant feature.
X = np.zeros(5)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.std_, scaler_csc.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [np.nan, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [np.inf, 5, 6, 7, 8]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
def test_robust_scaler_2d_arrays():
"""Test robust scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
"""Check RobustScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
"""Check MaxAbsScaler on toy data with zero variance features"""
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# sparse data
X_csr = sparse.csr_matrix(X)
X_trans = scaler.fit_transform(X_csr)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans.A, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv.A)
def test_maxabs_scaler_large_negative_value():
"""Check MaxAbsScaler on toy data with a large negative value"""
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]])
)
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
| bsd-3-clause |
winklerand/pandas | asv_bench/benchmarks/reindex.py | 7 | 6523 | from .pandas_vb_common import *
from random import shuffle
class Reindexing(object):
goal_time = 0.2
def setup(self):
self.rng = DatetimeIndex(start='1/1/1970', periods=10000, freq='1min')
self.df = DataFrame(np.random.rand(10000, 10), index=self.rng,
columns=range(10))
self.df['foo'] = 'bar'
self.rng2 = Index(self.rng[::2])
self.df2 = DataFrame(index=range(10000),
data=np.random.rand(10000, 30), columns=range(30))
# multi-index
N = 5000
K = 200
level1 = tm.makeStringIndex(N).values.repeat(K)
level2 = np.tile(tm.makeStringIndex(K).values, N)
index = MultiIndex.from_arrays([level1, level2])
self.s1 = Series(np.random.randn((N * K)), index=index)
self.s2 = self.s1[::2]
def time_reindex_dates(self):
self.df.reindex(self.rng2)
def time_reindex_columns(self):
self.df2.reindex(columns=self.df.columns[1:5])
def time_reindex_multiindex(self):
self.s1.reindex(self.s2.index)
#----------------------------------------------------------------------
# Pad / backfill
class FillMethod(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=100000, freq='1min')
self.ts = Series(np.random.randn(len(self.rng)), index=self.rng)
self.ts2 = self.ts[::2]
self.ts3 = self.ts2.reindex(self.ts.index)
self.ts4 = self.ts3.astype('float32')
def pad(self, source_series, target_index):
try:
source_series.reindex(target_index, method='pad')
except:
source_series.reindex(target_index, fillMethod='pad')
def backfill(self, source_series, target_index):
try:
source_series.reindex(target_index, method='backfill')
except:
source_series.reindex(target_index, fillMethod='backfill')
def time_backfill_dates(self):
self.backfill(self.ts2, self.ts.index)
def time_pad_daterange(self):
self.pad(self.ts2, self.ts.index)
def time_backfill(self):
self.ts3.fillna(method='backfill')
def time_backfill_float32(self):
self.ts4.fillna(method='backfill')
def time_pad(self):
self.ts3.fillna(method='pad')
def time_pad_float32(self):
self.ts4.fillna(method='pad')
#----------------------------------------------------------------------
# align on level
class LevelAlign(object):
goal_time = 0.2
def setup(self):
self.index = MultiIndex(
levels=[np.arange(10), np.arange(100), np.arange(100)],
labels=[np.arange(10).repeat(10000),
np.tile(np.arange(100).repeat(100), 10),
np.tile(np.tile(np.arange(100), 100), 10)])
random.shuffle(self.index.values)
self.df = DataFrame(np.random.randn(len(self.index), 4),
index=self.index)
self.df_level = DataFrame(np.random.randn(100, 4),
index=self.index.levels[1])
def time_align_level(self):
self.df.align(self.df_level, level=1, copy=False)
def time_reindex_level(self):
self.df_level.reindex(self.df.index, level=1)
#----------------------------------------------------------------------
# drop_duplicates
class Duplicates(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.K = 10
self.key1 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.key2 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.df = DataFrame({'key1': self.key1, 'key2': self.key2,
'value': np.random.randn((self.N * self.K)),})
self.col_array_list = list(self.df.values.T)
self.df2 = self.df.copy()
self.df2.ix[:10000, :] = np.nan
self.s = Series(np.random.randint(0, 1000, size=10000))
self.s2 = Series(np.tile(tm.makeStringIndex(1000).values, 10))
np.random.seed(1234)
self.N = 1000000
self.K = 10000
self.key1 = np.random.randint(0, self.K, size=self.N)
self.df_int = DataFrame({'key1': self.key1})
self.df_bool = DataFrame({i: np.random.randint(0, 2, size=self.K,
dtype=bool)
for i in range(10)})
def time_frame_drop_dups(self):
self.df.drop_duplicates(['key1', 'key2'])
def time_frame_drop_dups_inplace(self):
self.df.drop_duplicates(['key1', 'key2'], inplace=True)
def time_frame_drop_dups_na(self):
self.df2.drop_duplicates(['key1', 'key2'])
def time_frame_drop_dups_na_inplace(self):
self.df2.drop_duplicates(['key1', 'key2'], inplace=True)
def time_series_drop_dups_int(self):
self.s.drop_duplicates()
def time_series_drop_dups_string(self):
self.s2.drop_duplicates()
def time_frame_drop_dups_int(self):
self.df_int.drop_duplicates()
def time_frame_drop_dups_bool(self):
self.df_bool.drop_duplicates()
#----------------------------------------------------------------------
# blog "pandas escaped the zoo"
class Align(object):
goal_time = 0.2
def setup(self):
n = 50000
indices = tm.makeStringIndex(n)
subsample_size = 40000
def sample(values, k):
sampler = np.arange(len(values))
shuffle(sampler)
return values.take(sampler[:k])
self.x = Series(np.random.randn(50000), indices)
self.y = Series(np.random.randn(subsample_size),
index=sample(indices, subsample_size))
def time_align_series_irregular_string(self):
(self.x + self.y)
class LibFastZip(object):
goal_time = 0.2
def setup(self):
self.N = 10000
self.K = 10
self.key1 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.key2 = tm.makeStringIndex(self.N).values.repeat(self.K)
self.df = DataFrame({'key1': self.key1, 'key2': self.key2, 'value': np.random.randn((self.N * self.K)), })
self.col_array_list = list(self.df.values.T)
self.df2 = self.df.copy()
self.df2.ix[:10000, :] = np.nan
self.col_array_list2 = list(self.df2.values.T)
def time_lib_fast_zip(self):
lib.fast_zip(self.col_array_list)
def time_lib_fast_zip_fillna(self):
lib.fast_zip_fillna(self.col_array_list2)
| bsd-3-clause |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/doc/source/conf.py | 33 | 9781 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.0.1":
raise RuntimeError("Sphinx 1.0.1 or newer required")
needs_sphinx = '1.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# General substitutions.
project = 'NumPy'
copyright = '2008-2009, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print("%s %s" % (version, release))
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if not os.path.isdir(themedir):
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init && git submodule update")
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_sidebars = {'index': 'indexsidebar.html'}
html_additional_pages = {
'index': 'indexcontent.html',
}
html_title = "%s v%s Manual" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
htmlhelp_basename = 'numpy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Texinfo output
# -----------------------------------------------------------------------------
texinfo_documents = [
("contents", 'numpy', 'Numpy Documentation', _stdauthor, 'Numpy',
"NumPy: array processing for numbers, strings, records, and objects.",
'Programming',
1),
]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {'http://docs.python.org/dev': None}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
plot_rcparams = {
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(numpy.__file__))
if 'dev' in numpy.__version__:
return "http://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
else:
return "http://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
| mit |
cseyda/Master-thesis | code/Python/mongo.py | 1 | 32509 | #!/usr/bin/python
# -*- coding: utf8 -*-
import os.path
from scipy import arange
import sys
from interface import *
ger_small_file="/home/seydanator/Desktop/SVN/egtdata/tweets.germany.gt"
# 2MB
# 57.554
ger_small= "a56c48c3df72452177dce28efd790ddc"
ger_small_d = "a56c48c3df72452177dce28efd790ddcdisplace"
ger_file = "/home/seydanator/Desktop/SVN/egtdata/complete_2008-2011.germany.egt"
#140MB
# 1.254.298
ger = "90bda4cc98289c9d3c231127f8253189"
usa_small_file="/home/seydanator/Desktop/SVN/egtdata/complete_2008-2011.usa.egt"
#719MB
# 5.976.723
usa_small= "3f87f7a7aa3d8ba083407ff956f1ada1"
usa_small_1M= "3f87f7a7aa3d8ba083407ff956f1ada1_1M"
#1.086.678
usa_file = "/home/seydanator/Desktop/SVN/egtdata/tweets_110716_110815_text.us.egt"
#1200MB
# 14.531.622
usa = "32f974a4edb4eb8f1e2aa366fa1259b1"
# 279.889.256
# 16.777.216
dir_col = {
ger_small : "GER_SMALL",
ger_small_d : "GER_SMALL_D",
ger : "GER",
usa_small : "USA_SMALL",
usa_small_1M : "USA_SMALL_1M",
usa : "USA",
"blobs" : "",
"noisy_circles":"",
"noisy_moons":"",
"random":""
}
#
# Max 1.333.000 points because of 16MB file limit in MongoDB
#
#insert_file(ger_small_file)
#insert_file(ger_file)
#insert_file(usa_small_file)
#insert_file(usa_file)
#new_collection(ger_small, ger_small+"displace", displace=True)
#triang_combiner(ger_small)
#triang_combiner(ger_small+"displace")
#triang_combiner(ger)
#triang_combiner(usa_small)
#triang_combiner(usa)
#sys.exit()
#new_collection(usa_small, usa_small+"_1M", bbox=[0,0,0,0], sampling=(2,9), limit=0, count=False, combine=True, displace=False)
#triang_combiner(usa_small+"_1M")
#sys.exit()
#same_point("ger_test200k")
# .207
#200.000
#same_point("ger_test100k")
# .609
#114.028
#same_point("ger_test2")
# 1.457
#418.100
#ger
#uniques: 3.229
#points: 1.254.298
#usa_small
#13757
#5976723
#
#frequency_map(ger_small, borders_by_collection(ger_small), legend=True)
#frequency_map(ger, borders_by_collection(ger), legend=True)
#frequency_map(usa_small_1M, borders_by_collection(usa_small_1M), legend=True)
#edge_pic(ger_small, cm="winter")
#edge_pic(ger, cm="winter")
#edge_pic(usa_small_1M, cm="winter")
#sys.exit()
#cluster_histogram([home+dir_col[col]+"/"+efile])
#from datetime import datetime
#startTime = datetime.now()
#print eval_clustering(col, home+dir_col[col]+"/"+efile, 950, 8000)
#print eval_clustering(col, home+dir_col[col]+"/"+efile, 10, 1600, True)
#print eval_clustering(col, home+dir_col[col]+"/"+efile, 10, 1600, False)
#eval_clustering(col, home+dir_col[col]+"/"+efile, 980, 8000)
#eval_clustering(col, home+dir_col[col]+"/"+efile, 990, 8000)
#delta = datetime.now()-startTime
#if (delta.seconds > 120):
# print delta.seconds/float(60), "minutes"
#else:
# print delta.seconds, "seconds"
#D
#DB
#C
#SW
#Geo
#Jac
home = "/home/seydanator/Desktop/SVN/code/"
#different clusters
import matplotlib as mpl
mpl.rcParams['font.family'] = "Liberation Serif"
import matplotlib.pyplot as plt
import numpy as np
def get_scores(make, col, efile):
stats = {'Geo': {'SW': [], 'C': [], 'DB': [], 'D': []}, 'JBi': {'SW': [], 'C': [], 'DB': [], 'D': []}}
for m in make:
skip = 0
count_limit = 1000000000
count = count_limit
count_ = 0
equal = False
#while(count >= count_limit):
# skip += 500
# count_ = int(eval_clustering(col, home+dir_col[col]+"/"+efile, m, skip, True)["count"]["count"])
# if count == count_:
# equal = True
# break
# count = count_
count = int(eval_clustering(col, home+dir_col[col]+"/"+efile, m, skip, True)["count"]["count"])
if count > count_limit:
equal = True
if not equal:
stat = eval_clustering(col, home+dir_col[col]+"/"+efile, m, skip, False)
for typ, val in stat.iteritems():
for qm, score in val.iteritems():
stats[typ][qm].append(float(score))
return stats
def eval_comp(col, files, make = [10, 20, 30, 40, 50]):
#f, (geo_ax, jac_ax) = plt.subplots(2, sharex=True, sharey=True)
##f, geo_ax= plt.subplots()
#plt.xlabel('Quality Measure')
#geo_ax.set_ylabel('Location Scores')
#jac_ax.set_ylabel('Jaccard Scores')
#
#geo_ax.grid()
#jac_ax.grid()
#n_groups = 4
#index = np.arange(n_groups)
#bar_width = 1/float(len(files)+1)
#opacity = 0.4
#error_config = {'ecolor': '0.3'}
##color=["b","g","r","y"]
#cm = plt.cm.get_cmap("rainbow")#cubehelix")
#
j_means = []
j_std = []
g_means = []
g_std = []
for i, efile in enumerate(files):
j_means.append([])
j_std.append([])
g_means.append([])
g_std.append([])
#break
stats = get_scores(make, col, efile)
#print stats
for qm, scores in stats["Geo"].iteritems():
mean = np.ma.masked_invalid(scores).mean()
if not np.ma.count_masked(mean):
g_means[i].append(mean)
g_std[i].append(np.ma.masked_invalid(scores).std())
else:
g_means[i].append(0.0)
g_std[i].append(0.0)
for qm, scores in stats["JBi"].iteritems():
mean = np.ma.masked_invalid(scores).mean()
if not np.ma.count_masked(mean):
j_means[i].append(mean)
j_std[i].append(np.ma.masked_invalid(scores).std())
else:
j_means[i].append(0.0)
j_std[i].append(0.0)
print j_means
print j_std
print g_means
print g_std
print "location"
for i, efile in enumerate(files):
print "%s & %.2e & %.2e & %.2e & %.2e\\\\" % (efile, g_means[i][0], g_means[i][1], g_means[i][2], g_means[i][3])
print "text"
for i, efile in enumerate(files):
print "%s & %.2e & %.2e & %.2e & %.2e\\\\" % (efile, j_means[i][0], j_means[i][1], j_means[i][2], j_means[i][3])
#DB C SW D
#efile & 1.35e-02 & 3.18e-09 & 9.87e-01& 3.13e+00
#j_means=[[0.0, 0.62174666666666667, -0.053039099999999999, 0.0], [0.0, 0.62304533333333334, -0.057713966666666672, 0.0], [0.0, 0.728271, -0.118867, 0.0], [0.0, 0.50574433333333335, -0.090671500000000002, 0.0]]
#j_std=[[0.0, 0.017993807552105858, 0.011821196722272524, 0.0], [0.0, 0.019107415773870481, 0.0096527306023851209, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.10828102195162771, 0.011682913534730969, 0.0]]
#g_means=[[0.13513900000000001, 0.00911818, 0.32162033333333334, 0.00073883399999999995], [0.148448, 0.0085566400000000008, 0.30092066666666667, 0.00068666933333333326], [37.1616, 0.00557852, 0.43602200000000002, 3.2653099999999999e-12], [3.7070700000000003, 0.004697743333333333, 0.59381033333333333, 6.491913333333332e-12]]
#g_std=[[0.001865104286628497, 0.00086073188268279328, 0.07201798690759291, 0.00019194617108449967], [0.0069335815179939024, 0.00067720244196251987, 0.073248796760690155, 1.6565789835950764e-05], [0.0, 0.0, 0.0, 0.0], [1.4463635771824457, 0.0050081985185915128, 0.018190218916280868, 2.6178531075461219e-12]]
# copy lists in order to normalize to 0...1 and show also the original values
#import copy
#j_plot = copy.deepcopy(j_means)
#g_plot = copy.deepcopy(g_means)
#print g_means, g_plot
#normalizing DB values
#g_db = 0.0
#g_d = 0.0
#for line in g_means:
# g_db = max(g_db, line[0])
# g_d = max(g_d, line[3])
#
#j_db = 0.0
#j_d = 0.0
#for line in j_means:
# j_db = max(j_db, line[0])
# j_d = max(j_d, line[3])
#
#if g_db != 0.0:
# for line in g_plot:
# line[0]/=g_db
#if j_db != 0.0:
# for line in j_plot:
# line[0]/=j_db
#
#if g_d != 0.0:
# for line in g_plot:
# line[3]/=g_d
#if j_d != 0.0:
# for line in j_plot:
# line[3]/=j_d
#print g_means, g_plot
#for i, efile in enumerate(files):
# rects1 = geo_ax.bar(index+i*bar_width, g_plot[i], bar_width,
# alpha=opacity,
# color=cm(float(i)/len(files)),
# #yerr=g_std[i],
# error_kw=error_config,
# label=efile)
# for j, rect in enumerate(rects1):
# height = rect.get_height()
# geo_ax.text(rect.get_x()+rect.get_width()/2., 0.1, '%.2e'%g_means[i][j], ha='center', va='bottom', rotation='vertical')#1.05*height
#
# rects2 = jac_ax.bar(index+i*bar_width, j_plot[i], bar_width,
# alpha=opacity,
# color=cm(float(i)/len(files)),
# #yerr=j_std[i],
# error_kw=error_config,
# label=efile)
# for j, rect in enumerate(rects2):
# height = rect.get_height()
# jac_ax.text(rect.get_x()+rect.get_width()/2., 0.1, '%.2e'%j_means[i][j], ha='center', va='bottom', rotation='vertical')#1.05*height
#
#plt.xticks(index + (len(files)*bar_width) / 2.0, [u'0 ≤ DB ≤ ∞\nminimize', u'0 ≤ C ≤ 1\nminimize', u'-1 ≤ SW ≤ 1\nmaximize', u'0 ≤ D ≤ ∞\nmaximize'])
#plt.legend(ncol=2, loc=8, mode="expand", bbox_to_anchor=(0., -0.75, 1., .02))
#plt.tight_layout()
##plt.show()
#plt.savefig("compare.pdf", bbox_inches='tight', pad_inches=0)
#plt.close()
# GER Details
#inttt = dir_col[ger]+"/RW_0.986000_4_2.000000_0.100000_35000"
#outtt = inttt + "_testttt.pdf"
#bor = [52.3,52.6, 12.9,13.8]
#plot(ger, clusters_in_borders(ger, bor, inttt), show=False, output=outtt, cluster_count=27, borders=None, scatter=False, colored=True, shape_size=1000, legend=False)
#inttt = dir_col[ger]+"/COM_4_1.0_1000_0.1_b"
#outtt = inttt + "_testttt.pdf"
#bor = [52.13,52.683043, 12.6,14.0]
##min_lat, max_lat, min_lon, max_lon = borders
#plot(ger, parse_clustering(inttt), show=False, output=outtt, cluster_count=27, borders=bor, scatter=True, colored=True, shape_size=2000, legend=False)
# GER SMALL Details
col = ger_small
todo = ["/RW_0.969400_4_1.000000_0.100000_40000", "/COM_4_1.0_300_0.1_b"]
#COM_4_1.0_300_0.1_b
#clusters 888
#clustered 52418
#unclustered 5136
#
#RW_0.969400_4_1.000000_0.100000_40000
#clusters 2082
#clustered 43175
#unclustered 14379
for ttt in todo:
inttt = dir_col[col]+ttt
for key in ["acakfilm"]:#["berlin", "brandenburg", "hamburg", "potsdam", "hessen", "stuttgart", "frankfurt"]:
print col, key
frequency_map(col, borders_by_collection(col), clustering=parse_clustering(inttt), keywords=[key], legend=True, out=ttt[1:]+"_"+key, bin_scale=4)
sys.exit()
# USA Details
col = usa_small_1M
todo = ["/RW_0.951000_4_0.100000_0.100000_300000", "/COM_4_1.0_7000_0.5_b"]
for ttt in todo:
inttt = dir_col[col]+ttt
for key in ["nature", "desert", "coast"]:
print col, key
frequency_map(col, borders_by_collection(col), clustering=parse_clustering(inttt), keywords=[key], legend=True, out=ttt[1:]+"_"+key)
continue
outtt = dir_col[col]+"_"+key+"_"+ttt[:3]
#bor = [36.8, 43.8, -79.0,-69.5]#general, shapesize = 20000
#bor = [38.706946, 39.049052,-77.311707,-76.784363]#washington, shapesize = 500
#bor = [40.229218, 40.984045,-74.48822,-73.675232]#new york, ss=500
plot(col, parse_clustering(inttt), show=False, output=outtt, cluster_count=None, borders=borders_by_collection(col), scatter=False, colored=True, shape_size=50000, legend=False, keywords = [key])
#plot(col, clusters_in_borders(col, bor, inttt, 5000), show=False, output=outtt, cluster_count=27, borders=bor, scatter=False, colored=True, shape_size=10000, legend=False)
# GER Details
col = ger
todo = ["/RW_0.972000_4_1.000000_0.100000_35000", "/COM_4_1.0_1000_0.1_b"]
#COM_4_1.0_1000_0.1_b
#clusters 200
#clustered 1253613
#unclustered 685
#
#RW_0.972000_4_1.000000_0.100000_35000
#clusters 194
#clustered 955138
#unclustered 299160
for ttt in todo:
inttt = dir_col[col]+ttt
for key in ["schluchsee", "sanssouci", "oster", "berlin", "hamburg", "autobahn"]:
print col, key
frequency_map(col, borders_by_collection(col), clustering=parse_clustering(inttt), keywords=[key], legend=True, out=ttt[1:]+"_"+key, bin_scale=4)
continue
""""""
ger_files = [\
#"COM_4_1.0_1000_0.9_b", \
#"COM_4_1.0_1000_0.1_b", \
#"COM_4_1.0_500_0.5_b"]
#"RW_0.951000_4_0.100000_0.100000_35000",\
#"RW_0.955000_4_0.100000_0.100000_35000",\
#"RW_0.965000_4_0.100000_0.100000_35000",\
#"RW_0.972000_4_1.000000_0.100000_35000",\
#"RW_0.976000_4_1.000000_0.100000_35000",\
#"RW_0.981000_4_1.000000_0.100000_35000"
#"RW_0.983000_4_2.000000_0.100000_35000",\
#"RW_0.986000_4_2.000000_0.100000_35000",\
#"RW_0.992000_4_2.000000_0.100000_35000",\
#"random", "random2"
]
"""
location min min max max
COM_4_1.0_1000_0.9_b & 2.88e-02 & 3.76e-06 & 9.82e-01 & 7.19e-02\\
COM_4_1.0_1000_0.1_b & 1.20e-01 & 2.52e-05 & 9.05e-01 & 2.48e-03\\
COM_4_1.0_500_0.5_b & 1.71e-02 & 7.98e-06 & 9.76e-01 & 4.44e-02\\
RW_0.951000_4_0.100000_0.100000_35000 & 3.35e-01 & 3.27e-05 & 8.77e-01 & 5.65e-08\\
RW_0.955000_4_0.100000_0.100000_35000 & 6.09e-01 & 3.21e-05 & 8.89e-01 & 2.05e-09\\
RW_0.965000_4_0.100000_0.100000_35000 & 9.78e-01 & 8.62e-05 & 7.51e-01 & 3.29e-10\\
RW_0.972000_4_1.000000_0.100000_35000 & 2.20e-01 & 3.52e-05 & 8.93e-01 & 5.45e-08\\
RW_0.976000_4_1.000000_0.100000_35000 & 1.97e+03 & 6.84e-05 & 8.39e-01 & 5.45e-08\\
RW_0.981000_4_1.000000_0.100000_35000 & 7.22e+01 & 2.06e-04 & 7.59e-01 & 2.95e-09\\
RW_0.983000_4_2.000000_0.100000_35000 & 2.08e+00 & 1.26e-04 & 7.49e-01 & 7.51e-09\\
RW_0.986000_4_2.000000_0.100000_35000 & 2.43e+01 & 3.54e-04 & 7.05e-01 & 1.61e-09\\
RW_0.992000_4_2.000000_0.100000_35000 & 1.59e+04 & 1.07e-03 & 5.73e-01 & 4.35e-10\\
random & 0.00e+00 & 0.00e+00 & 0.00e+00 & 0.00e+00\\
random2 & 1.07e+08 & 2.64e-01 & -1.01e-01 & 0.00e+00\\
text
COM_4_1.0_1000_0.9_b & 1.29e+00 & 1.95e-01 & 5.34e-01 & 0.00e+00\\
COM_4_1.0_1000_0.1_b & 1.17e+00 & 1.90e-01 & 5.60e-01 & 0.00e+00\\
COM_4_1.0_500_0.5_b & 1.22e+00 & 1.94e-01 & 5.47e-01 & 0.00e+00\\
RW_0.951000_4_0.100000_0.100000_35000 & 1.18e+00 & 2.84e-01 & 3.19e-01 & 0.00e+00\\
RW_0.955000_4_0.100000_0.100000_35000 & 1.30e+00 & 2.77e-01 & 4.04e-01 & 0.00e+00\\
RW_0.965000_4_0.100000_0.100000_35000 & 1.59e+00 & 2.91e-01 & 3.86e-01 & 0.00e+00\\
RW_0.972000_4_1.000000_0.100000_35000 & 1.13e+00 & 2.78e-01 & 4.23e-01 & 0.00e+00\\
RW_0.976000_4_1.000000_0.100000_35000 & 1.25e+00 & 2.83e-01 & 3.41e-01 & 0.00e+00\\
RW_0.981000_4_1.000000_0.100000_35000 & 1.62e+00 & 3.02e-01 & 3.54e-01 & 0.00e+00\\
RW_0.983000_4_2.000000_0.100000_35000 & 1.47e+00 & 3.37e-01 & 2.93e-01 & 0.00e+00\\
RW_0.986000_4_2.000000_0.100000_35000 & 1.56e+00 & 3.52e-01 & 2.54e-01 & 0.00e+00\\
RW_0.992000_4_2.000000_0.100000_35000 & 1.42e+00 & 3.37e-01 & 3.10e-01 & 0.00e+00\\
random & 0.00e+00 & 0.00e+00 & 0.00e+00 & 0.00e+00\\
random2 & 1.57e+00 & 8.68e-01 & -3.34e-02 & 0.00e+00\\
"""
ger_small_files = [\
#"COM_4_1.0_300_0.5_b", \
#"COM_4_1.0_300_0.1_b", \
#"COM_4_1.0_200_0.5_b", \
#"COM_4_1.0_200_0.1_b"]
#"RW_0.960000_4_0.100000_0.100000_40000",\
#"RW_0.964000_4_0.100000_0.100000_40000",\
#"RW_0.965000_4_0.100000_0.100000_40000",\
#"RW_0.970000_4_1.000000_0.100000_40000",\
#"RW_0.976000_4_2.000000_0.100000_40000"
#"RW_0.969400_4_1.000000_0.100000_40000",
#"RW_0.969600_4_1.000000_0.100000_40000",
#"RW_0.969900_4_1.000000_0.100000_40000",
#"random", "random2"
]
"""
#DB C SW D
location min min max max
COM_4_1.0_300_0.5_b & 1.58e-01 & 2.72e-04 & 7.02e-01 & 3.54e-04\\
COM_4_1.0_300_0.1_b & 2.25e-01 & 1.17e-04 & 7.14e-01 & 2.77e-04\\
COM_4_1.0_200_0.5_b & 1.37e-01 & 6.21e-05 & 7.51e-01 & 1.20e-03\\
COM_4_1.0_200_0.1_b & 1.63e-01 & 5.07e-05 & 7.57e-01 & 6.95e-04\\
RW_0.960000_4_0.100000_0.100000_40000 & 1.14e+00 & 1.10e-04 & 7.64e-01 & 7.59e-10\\
RW_0.964000_4_0.100000_0.100000_40000 & 2.29e+00 & 3.42e-04 & 5.09e-01 & 7.99e-12\\
RW_0.965000_4_0.100000_0.100000_40000 & 2.48e+01 & 6.57e-04 & 4.20e-01 & 1.15e-10\\
RW_0.970000_4_1.000000_0.100000_40000 & 1.20e+00 & 1.78e-04 & 6.86e-01 & 3.60e-09\\
RW_0.976000_4_2.000000_0.100000_40000 & 2.60e+00 & 6.34e-04 & 4.72e-01 & 2.21e-10\\
RW_0.969400_4_1.000000_0.100000_40000 & 4.19e+00 & 1.66e-04 & 7.32e-01 & 9.30e-08\\
RW_0.969600_4_1.000000_0.100000_40000 & 6.92e+00 & 1.72e-04 & 7.18e-01 & 4.24e-09\\
RW_0.969900_4_1.000000_0.100000_40000 & 2.74e+00 & 1.79e-04 & 6.93e-01 & 3.90e-09\\
random & 3.57e+08 & 2.77e-01 & -2.70e-01 & 0.00e+00\\
random2 & 1.63e+08 & 2.79e-01 & -2.34e-01 & 0.00e+00\\
text
COM_4_1.0_300_0.5_b & 2.75e+00 & 5.48e-01 & -4.83e-03 & 0.00e+00\\
COM_4_1.0_300_0.1_b & 2.83e+00 & 5.29e-01 & 1.87e-03 & 0.00e+00\\
COM_4_1.0_200_0.5_b & 2.82e+00 & 5.54e-01 & -9.90e-03 & 0.00e+00\\
COM_4_1.0_200_0.1_b & 3.09e+00 & 5.35e-01 & -2.79e-03 & 0.00e+00\\
RW_0.960000_4_0.100000_0.100000_40000 & 3.54e+00 & 4.40e-01 & -7.30e-03 & 0.00e+00\\
RW_0.964000_4_0.100000_0.100000_40000 & 3.80e+00 & 5.92e-01 & -5.64e-02 & 0.00e+00\\
RW_0.965000_4_0.100000_0.100000_40000 & 3.63e+00 & 6.19e-01 & -5.05e-02 & 0.00e+00\\
RW_0.970000_4_1.000000_0.100000_40000 & 3.57e+00 & 4.00e-01 & 4.55e-03 & 0.00e+00\\
RW_0.976000_4_2.000000_0.100000_40000 & 3.52e+00 & 4.63e-01 & -2.01e-02 & 0.00e+00\\
RW_0.969400_4_1.000000_0.100000_40000 & 3.46e+00 & 3.71e-01 & 1.89e-02 & 0.00e+00\\
RW_0.969600_4_1.000000_0.100000_40000 & 3.38e+00 & 3.81e-01 & 1.28e-02 & 0.00e+00\\
RW_0.969900_4_1.000000_0.100000_40000 & 3.45e+00 & 3.95e-01 & 8.18e-03 & 0.00e+00\\
random & 6.93e+00 & 9.65e-01 & -2.53e-02 & 0.00e+00\\
random2 & 7.28e+00 & 9.63e-01 & -2.20e-02 & 0.00e+00\\
"""
usa_small_files = [\
#"COM_4_1.0_3000_0.9_b",\
#"COM_4_1.0_3000_0.5_b",\
#"COM_4_1.0_3000_0.1_b",\
#"RW_0.951000_4_0.100000_0.100000_300000",\
#"RW_0.955000_4_0.100000_0.100000_300000",\
#"RW_0.965000_4_0.100000_0.100000_300000",\
#"RW_0.981000_4_1.000000_0.100000_300000",\
#"RW_0.985000_4_1.000000_0.100000_300000",\
#"RW_0.986000_4_2.000000_0.100000_300000",\
#"RW_0.987000_4_2.000000_0.100000_300000",\
#"random", "random2"
]
"""
location min min max max
COM_4_1.0_3000_0.9_b & 4.40e-02 & 4.68e-06 & 9.16e-01 & 2.18e-02\\
COM_4_1.0_3000_0.5_b & 3.19e-02 & -3.10e-07 & 9.51e-01 & 9.11e-03\\
COM_4_1.0_3000_0.1_b & 6.96e-01 & -5.73e-07 & 8.78e-01 & 6.15e-04\\
RW_0.951000_4_0.100000_0.100000_300000 & 7.09e+00 & 7.01e-05 & 6.79e-01 & 2.17e-12\\
RW_0.955000_4_0.100000_0.100000_300000 & 5.52e+03 & 1.35e-04 & 6.51e-01 & 1.94e-12\\
RW_0.965000_4_0.100000_0.100000_300000 & 1.18e+03 & 2.00e-04 & 5.94e-01 & 2.68e-12\\
RW_0.981000_4_1.000000_0.100000_300000 & 2.51e+04 & 1.12e-03 & 4.65e-01 & 4.04e-13\\
RW_0.985000_4_1.000000_0.100000_300000 & 1.93e+04 & 3.14e-03 & 3.15e-01 & 1.29e-12\\
RW_0.986000_4_2.000000_0.100000_300000 & 1.31e+04 & 1.81e-03 & 3.28e-01 & 3.70e-13\\
RW_0.987000_4_2.000000_0.100000_300000 & 6.09e+04 & 3.58e-03 & 2.89e-01 & 2.78e-13\\
random & 0.00e+00 & 0.00e+00 & 0.00e+00 & 0.00e+00\\
random2 & 1.29e+03 & 2.35e-01 & -1.41e-01 & 0.00e+00\\
text
COM_4_1.0_3000_0.9_b & 1.35e+00 & 1.96e-01 & 4.82e-01 & 0.00e+00\\
COM_4_1.0_3000_0.5_b & 1.37e+00 & 1.74e-01 & 5.20e-01 & 0.00e+00\\
COM_4_1.0_3000_0.1_b & 1.37e+00 & 1.42e-01 & 6.69e-01 & 0.00e+00\\
RW_0.951000_4_0.100000_0.100000_300000 & 1.28e+00 & 2.70e-01 & 4.13e-01 & 0.00e+00\\
RW_0.955000_4_0.100000_0.100000_300000 & 1.41e+00 & 2.69e-01 & 4.08e-01 & 0.00e+00\\
RW_0.965000_4_0.100000_0.100000_300000 & 1.44e+00 & 2.31e-01 & 4.04e-01 & 0.00e+00\\
RW_0.981000_4_1.000000_0.100000_300000 & 1.54e+00 & 3.92e-01 & 3.25e-01 & 0.00e+00\\
RW_0.985000_4_1.000000_0.100000_300000 & 1.52e+00 & 3.22e-01 & 2.60e-01 & 0.00e+00\\
RW_0.986000_4_2.000000_0.100000_300000 & 1.48e+00 & 4.06e-01 & 2.47e-01 & 0.00e+00\\
RW_0.987000_4_2.000000_0.100000_300000 & 1.48e+00 & 4.69e-01 & 2.20e-01 & 0.00e+00\\
random & 0.00e+00 & 0.00e+00 & 0.00e+00 & 0.00e+00\\
random2 & 3.90e+00 & 8.69e-01 & -2.05e-02 & 0.00e+00\\
"""
#fff = ["blobs","noisy_circles","noisy_moons","random"]
#eval_comp(col, fff)
#t_files = ["COM_4_1.0_200_0.5_b", "COM_4_1.0_200_0.1_b", "COM_4_1.0_300_0.1_b", "RW_0.960000_4_0.100000_0.100000_40000"]
#eval_comp(ger_small, ger_small_files, [6,7,8,9])
#eval_comp(ger, ger_files, [6,7,8,9])
#eval_comp(usa_small_1M, usa_small_files, [6,7,8,9])
sys.exit()
def random_clustering(nodes, clusters, noise=0.1):
import random
for nid in xrange(nodes):
if random.random() > noise:
print nid, 1+nid%(clusters-1)
# 57.554 ger_small
# 1.254.298 ger
#1.086.678 usa_small_1M
#random_clustering(57554, 150)
#random_clustering(1254298, 150)
#random_clustering(1086678, 150)
sys.exit()
#col_ger_small = "a56c48c3df72452177dce28efd790ddc"
#col_ger = "90bda4cc98289c9d3c231127f8253189"
#clustering_str = "/home/seydanator/Desktop/SVN/code/usa"#LocF_10000_8.txt"
#lf = LocF(10000, 8)
#jac = Jaccard(0.5, 8, "w")
#graph = RWalk(0.5, 8, 1, 1)
#com = Comb(lf, jac)
#print lf
#print jac
#print com
#args = ['./dbscan', 'scan', "", ""]
#args.extend(str(com).split())
#print args
#sys.exit()
#print lf
#print jac
#print com
#usa_jac = "USA_"+collection_usa+"_jac"
#usa_lf = "USA_"+collection_usa+"_lf"
#usa_com = "USA_"+collection_usa+"_com"
#gsg = "GER_SMALL"
#gs = "GER"
#borders = borders_by_collection(ger)
#print borders
#plot(ger_small, parse_clustering(gsg + "/LF_2000_6"), borders=borders, output=gsg+"/LF_2000_6.png", cluster_count=16)
from itertools import product
def cluster_RWalk(col, points, text_w, jump, epses, dists, text_t=100, ss=15000):
borders = borders_by_collection(col)
for pts, w, c, eps, dist in product(points, text_w, jump, epses, dists):
#cl_st = dir_col[col]+"/"+"RW_"+str(eps)+"_"+str(pts)+"_"+str(w)+"_"+str(c)+"_"+str(dist)
cl_st = "{}/RW_{:f}_{:d}_{:f}_{:f}_{:d}".format(dir_col[col], eps, pts, w, c, dist)
if not os.path.isfile(cl_st):
print cl_st
stat = cluster(col, RWalk(eps, pts, w, c, dist, text_t), cl_st)
if 3*stat["unclustered"] <= stat["clustered"]:
clusters = parse_clustering(cl_st)
for count in [25]:
print "plotting", count, ss
plot(col, clusters, show=False, output=cl_st+"_"+str(count), cluster_count=count, borders=borders, legend=False, shape_size=ss)
def cluster_LocF(col, points, epses, ss=15000):
borders = borders_by_collection(col)
for eps, pts in product(epses, points):
cl_st = dir_col[col]+"/"+"LF_"+str(eps)+"_"+str(pts)
if not os.path.isfile(cl_st):
stat = cluster(col, LocF(eps, pts), cl_st)
if 3*stat["unclustered"] <= stat["clustered"]:
clusters = parse_clustering(cl_st)
for count in [25]:
plot(col, clusters, show=False, output=cl_st+"_"+str(count), cluster_count=count, borders=borders, shape_size=ss)
def cluster_Jaccard(col, points, epses, kind, ss=15000):
borders = borders_by_collection(col)
for eps, pts in product(epses, points):
#cl_st = dir_col[col]+"/"+"JW_"+str(eps)+"_"+str(pts)+"_"+kind
cl_st = "{}/JW_{:f}_{:d}_{}".format(dir_col[col], eps, pts, kind)
if not os.path.isfile(cl_st):
stat = cluster(col, Jaccard(eps, pts, kind), cl_st)
if 3*stat["unclustered"] <= stat["clustered"]:
clusters = parse_clustering(cl_st)
for count in [25]:
plot(col, clusters, show=False, output=cl_st+"_"+str(count), cluster_count=count, borders=borders, shape_size=ss)
def cluster_Combined(col, points, epses, loc_epses, jac_epses, kind, ss=15000):
borders = borders_by_collection(col)
for pts, eps, loc_eps, jac_eps in product(points, epses, loc_epses, jac_epses):
cl_st = dir_col[col]+"/"+"COM_"+str(pts)+"_"+str(eps)+"_"+str(loc_eps)+"_"+str(jac_eps)+"_"+kind
if not os.path.isfile(cl_st):
l = LocF(loc_eps, pts)
j = Jaccard(jac_eps, pts, kind)
stat = cluster(col, Comb(eps, pts, l, j), cl_st)
if 3*stat["unclustered"] <= stat["clustered"]:
clusters = parse_clustering(cl_st)
for count in [25]:
plot(col, clusters, show=False, output=cl_st+"_"+str(count), cluster_count=count, borders=borders, shape_size=ss)
#print borders_by_collection(ger)
#[47.17342, 55.05963, 5.89202, 15.02338]
#print borders_by_collection(usa_small)
#[25.3716, 50.06222, -124.83338, -52.62152]
#print borders_by_collection(usa)
#[23.5, 52.99994, -129.72311, -51.17668]
#shape_size usa = 30000
#ger = 15000
#cluster_Combined(usa_small_1M, [4], [1.0], arange(2000, 100000 , 2000), [0.5], "b")
#cluster_Combined(ger_small, [4], [1.0],[100,200, 300, 500, 1000], [0.5], "b")
#cluster_LocF(ger_small, [8], arange(100, 501, 100))
#sys.exit(0)
#plot(usa_small_1M, parse_clustering("USA_SMALL_1M/LF_15000_8"), show=False, cluster_count=30, borders=borders_by_collection(usa_small_1M), shape_size=30000, legend=True)
#cluster_RWalk(usa_small_1M, [4], [0.1], [0.1], arange(0.95, .97, 0.001), [300000], 5, ss=50000)
#cluster_RWalk(usa_small_1M, [4], [1.0], [0.1], arange(0.98, .99, 0.001), [300000], 5, ss=50000)
#cluster_RWalk(usa_small_1M, [4], [2.0], [0.1], arange(0.98, .99, 0.001), [300000], 5, ss=50000)
#cluster_Combined(usa_small_1M, [4], [1.0],[2000, 3000, 5000, 7000], [0.1, 0.5, 0.9], "b", ss=50000)
#cluster_RWalk(ger, [4], [0.1], [0.1], arange(0.95, .97, 0.001), [35000], 5)
#cluster_RWalk(ger, [4], [1.0], [0.1], arange(0.97, .99, 0.001), [35000], 5)
#cluster_RWalk(ger, [4], [2.0], [0.1], arange(0.98, 1.0, 0.001), [35000], 5)
#cluster_Combined(ger, [4], [1.0],[100,200, 300, 500, 1000], [0.1, 0.5, 0.9], "b")
#cluster_RWalk(ger_small, [4], [0.1], [0.1], arange(0.96, .98, 0.001), [40000], 5)
#cluster_RWalk(ger_small, [4], [1.0], [0.1], arange(0.969, .97, 0.0001), [40000], 5)
#cluster_RWalk(ger_small, [4], [2.0], [0.1], arange(0.97, .98, 0.001), [40000], 5)
#cluster_Combined(ger_small, [4], [1.0],[100,200, 300, 500, 1000], [0.1, 0.5, 0.9], "b")
#try other scopes
#cluster_RWalk(usa_small_1M, [4], [2.0, 3.0], [0.01], arange(0.998, .999, 0.0001), [300000], 5, ss=50000)
#cluster_Combined(usa_small_1M, [4], [0.5, 0.7, 1.0, 1.2, 1.5, 2.0],[100000], [0.1], "b", ss=50000)
#cluster_Jaccard(usa_small_1M, [8], [0.4, 0.5, 0,6 ,0.7], "b")#arange(0.9, 1.01, 0.01)
sys.exit()
#cluster_histogram(["/home/seydanator/Desktop/SVN/code/USA_SMALL_1M/LF_60000_8","/home/seydanator/Desktop/SVN/code/USA_SMALL_1M/LF_10000_8","/home/seydanator/Desktop/SVN/code/USA_SMALL_1M/LF_40000_8","/home/seydanator/Desktop/SVN/code/USA_SMALL_1M/LF_20000_8"])
#cluster_RWalk(ger_small_d, [4], [1.0], [0.1], arange(0.965, 0.97, 0.0005))
#triang_combiner(ger)
#
#from matplotlib import cm
#maps=[m for m in cm.datad if not m.endswith("_r")]
#maps.sort()
#l=len(maps)+1
#for i, m in enumerate(maps):
# edge_pic(usa_small_1M, cm=m)
#edge_pic(ger_small)
#edge_pic(ger)
sys.exit()
#from datetime import datetime
#for pair in [(10, 90),(100, 0), (90, 10), (900, 100), (50, 50), (500, 500), (75, 25), (25, 75)]:
# startTime = datetime.now()
# ev = eval_clustering(ger_small, "/home/seydanator/Desktop/SVN/code/GER_SMALL/LF_2000_4", pair[0], pair[1])
# print pair, datetime.now()-startTime
# print ev
#print ev['Geo']
#print ev['JBi']
#locf
#jac
def add_collection_points(arr, name):
def add_id(gen):
_id = 0
for en in gen:
en["_id"] = _id
yield en
_id += 1
tweet_list = []
tweet_counter = 0
write_buffer = 1000
for tweet in add_id(arr):
tweet_counter += 1
tweet_list.append(tweet)
if tweet_counter == write_buffer:
tweets[name].insert(tweet_list)
tweet_list = []
tweet_counter = 0
if tweet_counter > 0:
tweets[name].insert(tweet_list)
#new_collection(ger, "ger_test")# sampling=(2,1))#count=30000)#
#new_collection(ger, "ger_test90k", sampling=(1,12), count=False)
#frequency_map("ger_test100k", borders_by_collection("ger_test100k"))
#cluster_RWalk(ger_small, [4], [1.1], [0.1], arange(0.95, 0.99, 0.01))
cluster_RWalk("ger_test90k", [4], [1.0], [0.1], arange(0.95, 0.99, 0.01))
sys.exit()
#cluster(collection_ger_small, graph, ger_small_graph)
#plot(collection_ger_small, ger_small_graph, show=False, output=ger_small_graph, cluster_count=30)
#cluster(collection_usa, jac, usa_jac)
#plot(collection_usa, usa_jac, show=False, output=usa_jac, cluster_count=8)
#cluster(collection_usa, lf, usa_lf)
#plot(collection_usa, usa_lf, show=False, output=usa_lf, cluster_count=8)
#cluster(collection_usa, com, usa_com)
#plot(collection_usa, usa_com, show=False, output=usa_com, cluster_count=8)
#evaluation_lf = eval_clustering(collection_usa, usa_lf)
#print evaluation_lf
#evaluation_jac = eval_clustering(collection_usa, usa_jac)
#print evaluation_jac
#evaluation_com = eval_clustering(collection_usa, usa_com)
#print evaluation_com
#print evaluation
sys.exit()
from sklearn import datasets
from sklearn.metrics import euclidean_distances
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5, noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
np.random.seed(0)
no_structure = np.random.rand(n_samples, 2), None
#sys.exit()
import pymongo
connection = pymongo.MongoClient()
tweets = connection['tweets']
def add_points(arr, name):
def add_id(gen):
_id = 0
for pair in gen:
elem = {}
elem["_id"] = _id
x,y = pair
elem["loc"] = [x,y]
elem["tag"] = []
yield elem
_id += 1
tweet_list = []
tweet_counter = 0
write_buffer = 1000
for tweet in add_id(arr):
tweet_counter += 1
tweet_list.append(tweet)
if tweet_counter == write_buffer:
tweets[name].insert(tweet_list)
tweet_list = []
tweet_counter = 0
if tweet_counter > 0:
tweets[name].insert(tweet_list)
add_points(StandardScaler().fit_transform(noisy_circles[0]), "noisy_circles")
add_points(StandardScaler().fit_transform(noisy_moons[0]), "noisy_moons")
add_points(StandardScaler().fit_transform(blobs[0]), "blobs")
add_points(StandardScaler().fit_transform(no_structure[0]), "random")
with open("random", "w") as ran:
for i in xrange(n_samples):
ran.write("%s %s\n" % (i,(i%2)))
for name in ["noisy_circles", "noisy_moons", "blobs"]:
cluster(name, LocF(5000.0, 2), name)
for name in ["noisy_circles", "noisy_moons", "blobs", "random"]:
ev = eval_clustering(name, name, 1, 0)
print ev
plot(name, parse_clustering(name), show=False, scatter=False, colored=True, output="test/"+name+"_color_2000",shape_size=2000)
sys.exit()
| gpl-2.0 |
mira67/TakeoutDataAnalysis | python/mean_shift_n.py | 1 | 19555 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
import six
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_random_state, gen_batches, check_array
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import pairwise_distances_argmin,euclidean_distances
from joblib import Parallel,delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
n_jobs=1):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
X = check_array(X)
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile),
n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter,kernel, gamma, computed_weights, weights):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
#compute weights
if computed_weights:
weights_within = np.reciprocal(weights[i_nbrs])
else:
weights_within = None
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
#my_mean = np.mean(points_within, axis=0)
my_mean = _kernel_update(my_old_mean, points_within, bandwidth,
kernel, gamma, computed_weights, weights_within)
# If converged or at max_iter, adds the cluster
if (np.linalg.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1,kernel='flat', gamma=1.0,computed_weights = False, weights = None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
For an example, see :ref:`examples/cluster/plot_mean_shift.py
<sphx_glr_auto_examples_cluster_plot_mean_shift.py>`.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=n_jobs)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=n_jobs).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter,kernel, gamma, computed_weights, weights) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth,
n_jobs=n_jobs).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def _kernel_update(old_cluster_center, points, bandwidth, kernel, gamma, computed_weights, weights):
""" Update cluster center according to kernel used
Parameters
----------
old_window_center : array_like, shape=[n_features]
The location of the old centroid candidate.
points : array_like, shape=[n_samples, n_features]
All data points that fall within the window of which the new mean
is to be computed.
bandwidth : float
Size of the kernel. All points within this range are used to
compute the new mean. There should be no points outside this range
in the points variable.
kernel : string
The kernel used for updating the window center. Available are:
'flat', 'rbf', 'epanechnikov' and 'biweight'
gamma : float
Controls the width of the rbf kernel. Only used with rbf kernel
Lower values make it more like the flat kernel, higher values give
points closer to the old center more weights.
Notes
-----
Kernels as mentioned in the paper "Mean Shift, Mode Seeking, and
Clustering" by Yizong Cheng, published in IEEE Transaction On Pattern
Analysis and Machine Intelligence, vol. 17, no. 8, august 1995.
The rbf or Gaussian kernel is a truncated version, since in this
implementation of Mean Shift clustering only the points within the
range of bandwidth around old_cluster_center are fed into the points
variable.
"""
# The flat kernel gives all points within range equal weight
# No extra calculations needed
if kernel == 'flat':
weighted_mean = np.mean(points, axis=0)
elif kernel == 'takeout':
# Compute new mean
distances = euclidean_distances(points, old_cluster_center)
weights_within = weights*np.exp(-1 * 1.0 * (distances ** 2))
weighted_mean = np.sum(points * weights_within, axis=0) / np.sum(weights_within)
else:
# Define the weights function for each kernel
if kernel == 'rbf':
compute_weights = lambda p, b: np.exp(-1 * gamma * (p ** 2))
elif kernel == 'epanechnikov':
compute_weights = lambda p, b: 1.0 - (p ** 2 / b ** 2)
elif kernel == 'biweight':
compute_weights = lambda p, b: (1.0 - (p ** 2 / b ** 2)) ** 2
elif kernel == 'takeout':
pass#compute_weights =
else:
implemented_kernels = ['flat', 'rbf', 'epanechnikov', 'biweight']
raise ValueError("Unknown kernel, please use one of: %s" %
", ".join(implemented_kernels))
# Compute new mean
distances = euclidean_distances(points, old_cluster_center)
weights = compute_weights(distances, bandwidth)
weighted_mean = np.sum(points * weights, axis=0) / np.sum(weights)
return weighted_mean
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1,kernel='flat',gamma=1.0,computed_weights = False, weights=None):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
self.kernel = kernel
self.gamma = gamma
self.computed_weights = computed_weights
self.weights = weights
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs,kernel=self.kernel,gamma=self.gamma,
computed_weights = self.computed_weights,
weights = self.weights)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| gpl-3.0 |
NSLS-II-CHX/ipython_ophyd | profile_collection/ipython_qtconsole_config.py | 13 | 24674 | # Configuration file for ipython-qtconsole.
c = get_config()
#------------------------------------------------------------------------------
# IPythonQtConsoleApp configuration
#------------------------------------------------------------------------------
# IPythonQtConsoleApp will inherit config from: BaseIPythonApplication,
# Application, IPythonConsoleApp, ConnectionFileMixin
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPythonQtConsoleApp.ip = u''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPythonQtConsoleApp.verbose_crash = False
# Start the console window maximized.
# c.IPythonQtConsoleApp.maximize = False
# The date format used by logging formatters for %(asctime)s
# c.IPythonQtConsoleApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPythonQtConsoleApp.shell_port = 0
# The SSH server to use to connect to the kernel.
# c.IPythonQtConsoleApp.sshserver = ''
# set the stdin (DEALER) port [default: random]
# c.IPythonQtConsoleApp.stdin_port = 0
# Set the log level by value or name.
# c.IPythonQtConsoleApp.log_level = 30
# Path to the ssh key to use for logging in to the ssh server.
# c.IPythonQtConsoleApp.sshkey = ''
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPythonQtConsoleApp.extra_config_file = u''
# Whether to create profile dir if it doesn't exist
# c.IPythonQtConsoleApp.auto_create = False
# path to a custom CSS stylesheet
# c.IPythonQtConsoleApp.stylesheet = ''
# set the heartbeat port [default: random]
# c.IPythonQtConsoleApp.hb_port = 0
# Whether to overwrite existing config files when copying
# c.IPythonQtConsoleApp.overwrite = False
# set the iopub (PUB) port [default: random]
# c.IPythonQtConsoleApp.iopub_port = 0
# The IPython profile to use.
# c.IPythonQtConsoleApp.profile = u'default'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security-
# dir of the current profile, but can be specified by absolute path.
# c.IPythonQtConsoleApp.connection_file = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.IPythonQtConsoleApp.confirm_exit = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPythonQtConsoleApp.ipython_dir = u''
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPythonQtConsoleApp.copy_config_files = False
# Connect to an already running kernel
# c.IPythonQtConsoleApp.existing = ''
# Use a plaintext widget instead of rich text (plain can't print/save).
# c.IPythonQtConsoleApp.plain = False
# Start the console window with the menu bar hidden.
# c.IPythonQtConsoleApp.hide_menubar = False
# The Logging format template
# c.IPythonQtConsoleApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#
# c.IPythonQtConsoleApp.transport = 'tcp'
#------------------------------------------------------------------------------
# IPythonWidget configuration
#------------------------------------------------------------------------------
# A FrontendWidget for an IPython kernel.
# IPythonWidget will inherit config from: FrontendWidget, HistoryConsoleWidget,
# ConsoleWidget
# The type of completer to use. Valid values are:
#
# 'plain' : Show the available completion as a text list
# Below the editing area.
# 'droplist': Show the completion in a drop down list navigable
# by the arrow keys, and from which you can select
# completion by pressing Return.
# 'ncurses' : Show the completion as a text list which is navigable by
# `tab` and arrow keys.
# c.IPythonWidget.gui_completion = 'ncurses'
# Whether to process ANSI escape codes.
# c.IPythonWidget.ansi_codes = True
# A CSS stylesheet. The stylesheet can contain classes for:
# 1. Qt: QPlainTextEdit, QFrame, QWidget, etc
# 2. Pygments: .c, .k, .o, etc. (see PygmentsHighlighter)
# 3. IPython: .error, .in-prompt, .out-prompt, etc
# c.IPythonWidget.style_sheet = u''
# The height of the console at start time in number of characters (will double
# with `vsplit` paging)
# c.IPythonWidget.height = 25
#
# c.IPythonWidget.out_prompt = 'Out[<span class="out-prompt-number">%i</span>]: '
#
# c.IPythonWidget.input_sep = '\n'
# Whether to draw information calltips on open-parentheses.
# c.IPythonWidget.enable_calltips = True
#
# c.IPythonWidget.in_prompt = 'In [<span class="in-prompt-number">%i</span>]: '
# The width of the console at start time in number of characters (will double
# with `hsplit` paging)
# c.IPythonWidget.width = 81
# A command for invoking a system text editor. If the string contains a
# {filename} format specifier, it will be used. Otherwise, the filename will be
# appended to the end the command.
# c.IPythonWidget.editor = ''
# If not empty, use this Pygments style for syntax highlighting. Otherwise, the
# style sheet is queried for Pygments style information.
# c.IPythonWidget.syntax_style = u''
# The font family to use for the console. On OSX this defaults to Monaco, on
# Windows the default is Consolas with fallback of Courier, and on other
# platforms the default is Monospace.
# c.IPythonWidget.font_family = u''
# The pygments lexer class to use.
# c.IPythonWidget.lexer_class = <IPython.utils.traitlets.Undefined object at 0x1866810>
#
# c.IPythonWidget.output_sep2 = ''
# Whether to automatically execute on syntactically complete input.
#
# If False, Shift-Enter is required to submit each execution. Disabling this is
# mainly useful for non-Python kernels, where the completion check would be
# wrong.
# c.IPythonWidget.execute_on_complete_input = True
# The maximum number of lines of text before truncation. Specifying a non-
# positive number disables text truncation (not recommended).
# c.IPythonWidget.buffer_size = 500
#
# c.IPythonWidget.history_lock = False
#
# c.IPythonWidget.banner = u''
# The type of underlying text widget to use. Valid values are 'plain', which
# specifies a QPlainTextEdit, and 'rich', which specifies a QTextEdit.
# c.IPythonWidget.kind = 'plain'
# Whether to ask for user confirmation when restarting kernel
# c.IPythonWidget.confirm_restart = True
# The font size. If unconfigured, Qt will be entrusted with the size of the
# font.
# c.IPythonWidget.font_size = 0
# The editor command to use when a specific line number is requested. The string
# should contain two format specifiers: {line} and {filename}. If this parameter
# is not specified, the line number option to the %edit magic will be ignored.
# c.IPythonWidget.editor_line = u''
# Whether to clear the console when the kernel is restarted
# c.IPythonWidget.clear_on_kernel_restart = True
# The type of paging to use. Valid values are:
#
# 'inside'
# The widget pages like a traditional terminal.
# 'hsplit'
# When paging is requested, the widget is split horizontally. The top
# pane contains the console, and the bottom pane contains the paged text.
# 'vsplit'
# Similar to 'hsplit', except that a vertical splitter is used.
# 'custom'
# No action is taken by the widget beyond emitting a
# 'custom_page_requested(str)' signal.
# 'none'
# The text is written directly to the console.
# c.IPythonWidget.paging = 'inside'
#
# c.IPythonWidget.output_sep = ''
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the IP or interface on which the kernel will listen.
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = 'IPython.kernel.zmq.ipkernel.Kernel'
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
#
# c.IPKernelApp.parent_appname = u''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = u''
#
# c.KernelManager.transport = 'tcp'
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'swilkins'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
#------------------------------------------------------------------------------
# InlineBackend configuration
#------------------------------------------------------------------------------
# An object to store configuration of the inline backend.
# The figure format to enable (deprecated use `figure_formats` instead)
# c.InlineBackend.figure_format = u''
# A set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
# c.InlineBackend.figure_formats = set(['png'])
# Extra kwargs to be passed to fig.canvas.print_figure.
#
# Logical examples include: bbox_inches, quality (for jpeg figures), etc.
# c.InlineBackend.print_figure_kwargs = {'bbox_inches': 'tight'}
# Close all figures at the end of each cell.
#
# When True, ensures that each cell starts with no active figures, but it also
# means that one must keep track of references in order to edit or redraw
# figures in subsequent cells. This mode is ideal for the notebook, where
# residual plots from other cells might be surprising.
#
# When False, one must call figure() to create new figures. This means that
# gcf() and getfigs() can reference figures created in other cells, and the
# active figure can continue to be edited with pylab/pyplot methods that
# reference the current active figure. This mode facilitates iterative editing
# of figures, and behaves most consistently with other matplotlib backends, but
# figure barriers between cells must be explicit.
# c.InlineBackend.close_figures = True
# Subset of matplotlib rcParams that should be different for the inline backend.
# c.InlineBackend.rc = {'font.size': 10, 'figure.figsize': (6.0, 4.0), 'figure.facecolor': (1, 1, 1, 0), 'savefig.dpi': 72, 'figure.subplot.bottom': 0.125, 'figure.edgecolor': (1, 1, 1, 0)}
| bsd-2-clause |
rfleissner/ChEsher | py/modules/moduleScalarDXF.py | 1 | 8385 | #!/usr/bin/python -d
#
# Copyright (C) 2016 Reinhard Fleissner
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""Wrapper for module ScalarDXF"""
__author__="Reinhard Fleissner"
__date__ ="$18.05.2016 22:38:30$"
import os
import functools
from math import ceil, floor
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.tri as tri
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QMessageBox, QFileDialog
# modules and classes
from uiScalarDXF import Ui_ScalarDXF
import fileHandler as fh
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class WrapScalarDXF():
"""Wrapper for module ScalarDXF"""
def __init__(self):
"""Constructor."""
# setup user interface
self.widget = QtGui.QWidget()
self.ui = Ui_ScalarDXF()
self.ui.setupUi(self.widget)
self.directory = os.path.abspath('.')
# module ScalarDXF
self.callbackOpenScalarInputT3SMajor = functools.partial(self.getOpenFileName, "Open 2D T3 Scalar Mesh", "2D T3 Scalar Mesh (ASCIISingleFrame) (*.t3s)", self.ui.lineEditInputT3SMajor)
QtCore.QObject.connect(self.ui.pushButtonInputT3SMajor, QtCore.SIGNAL(_fromUtf8("clicked()")), self.callbackOpenScalarInputT3SMajor)
self.callbackOpenScalarInputT3SMinor = functools.partial(self.getOpenFileName, "Open 2D T3 Scalar Mesh", "2D T3 Scalar Mesh (ASCIISingleFrame) (*.t3s)", self.ui.lineEditInputT3SMinor)
QtCore.QObject.connect(self.ui.pushButtonInputT3SMinor, QtCore.SIGNAL(_fromUtf8("clicked()")), self.callbackOpenScalarInputT3SMinor)
self.callbackScalarScalar = functools.partial(self.getSaveFileName, "Save DXF-file As", "Drawing Interchange File (*.dxf)", self.ui.lineEditOutput)
QtCore.QObject.connect(self.ui.pushButtonOutput, QtCore.SIGNAL(_fromUtf8("clicked()")), self.callbackScalarScalar)
self.ui.spinBoxScale.valueChanged.connect(self.setScale)
self.scalarSymbol = 0
self.callbackCircle = functools.partial(self.setSymbol, 0)
QtCore.QObject.connect(self.ui.radioButtonCircle, QtCore.SIGNAL("clicked()"), self.callbackCircle)
self.callbackCross = functools.partial(self.setSymbol, 1)
QtCore.QObject.connect(self.ui.radioButtonCross, QtCore.SIGNAL("clicked()"), self.callbackCross)
self.callbackCrosshairs = functools.partial(self.setSymbol, 2)
QtCore.QObject.connect(self.ui.radioButtonCrosshairs, QtCore.SIGNAL("clicked()"), self.callbackCrosshairs)
self.callbackNone = functools.partial(self.setSymbol, 3)
QtCore.QObject.connect(self.ui.radioButtonNone, QtCore.SIGNAL("clicked()"), self.callbackNone)
QtCore.QObject.connect(self.ui.pushButtonCreate, QtCore.SIGNAL("clicked()"), self.create)
def setDir(self, directory):
self.directory = directory
print "set", self.directory
def initialize(self):
import os
abs_path = os.path.abspath('.')
dir = os.path.join(abs_path, 'examples/').replace('\\', '/')
### ~ module ScalarDXF ~ ###
self.ui.lineEditInputT3SMajor.setText(dir + "example_05/WATER DEPTH_S161_Case_A.t3s")
self.ui.lineEditInputT3SMinor.setText(dir + "example_05/WATER DEPTH_S161_Case_B.t3s")
self.ui.doubleSpinBoxDX.setValue(50.0)
self.ui.doubleSpinBoxDY.setValue(50.0)
self.ui.doubleSpinBoxSizeFactor.setValue(7.5)
self.ui.checkBoxMonochrome.setChecked(True)
self.ui.radioButtonCircle.setChecked(False)
self.ui.radioButtonCrosshairs.setChecked(True)
self.setSymbol(2)
self.ui.lineEditOutput.setText(dir + "example_05/output/water_depth.dxf")
def create(self):
info = ""
dx = self.ui.doubleSpinBoxDX.value()
dy = self.ui.doubleSpinBoxDY.value()
SMin = self.ui.doubleSpinBoxSMin.value()
SMax = self.ui.doubleSpinBoxSMax.value()
scale = self.ui.doubleSpinBoxSizeFactor.value()
eps = self.ui.doubleSpinBoxLessThan.value()
# read input meshes
try:
x, y, zMajor, triangles, boundaries = fh.readT3STriangulation(self.ui.lineEditInputT3SMajor.text())
except Exception, e:
QMessageBox.critical(self, "Error", "Not able to load mesh file!\nCheck filename or content!" + "\n\n" + str(e))
return
minor = False
if self.ui.lineEditInputT3SMinor.text() != "":
minor = True
try:
x, y, zMinor, triangles, boundaries = fh.readT3STriangulation(self.ui.lineEditInputT3SMinor.text())
except Exception, e:
QMessageBox.critical(self.widget, "Error", "Not able to load mesh file!\nCheck filename or content!" + "\n\n" + str(e))
return
scalarNodes = {}
sCounter = 0
xMin = min(x)
xMax = max(x)
yMin = min(y)
yMax = max(y)
triang = tri.Triangulation(x, y, triangles)
# Interpolate to regularly-spaced quad grid.
# origin of scalar
x0 = floor(xMin/dx)*dx
y0 = floor(yMin/dy)*dy
# number of nodes in x- and y-direction
nx = int(ceil(xMax/dx) - floor(xMin/dx))
ny = int(ceil(yMax/dy) - floor(yMin/dy))
xGrid, yGrid = np.meshgrid(np.linspace(x0, x0+nx*dx, nx+1), np.linspace(y0, y0+ny*dy, ny+1))
info += " - Grid created with {0} x {1} points:\n\t- dx = {2}\n\t- dy = {3}\n\t- x(min) = {4}\n\t- y(min) = {5}\n\t- x(max) = {6}\n\t- y(max) = {7}\n".format(nx, ny, dx, dy, x0, y0, x0+nx*dx, y0+ny*dy)
interpLinMajor = tri.LinearTriInterpolator(triang, zMajor)
zGridMaj = interpLinMajor(xGrid, yGrid)
zGridMin = []
if minor is True:
interpLinMinor = tri.LinearTriInterpolator(triang, zMinor)
zGridMin = interpLinMinor(xGrid, yGrid)
for iy in range(len(xGrid)):
for ix in range(len(xGrid[0])):
if minor is True:
scalarNodes[sCounter] = [xGrid[iy][ix], yGrid[iy][ix], zGridMaj[iy][ix], zGridMin[iy][ix]]
sCounter += 1
else:
scalarNodes[sCounter] = [xGrid[iy][ix], yGrid[iy][ix], zGridMaj[iy][ix], None]
sCounter += 1
useMono = self.ui.checkBoxMonochrome.isChecked()
fname = self.ui.lineEditOutput.text()
info += "\n - Number of interpolated values: {0}".format(len(scalarNodes))
try:
nOfValues = fh.writeScalarDXF(scalarNodes, SMin, SMax, eps, scale, self.scalarSymbol, useMono, fname)
info += "\n - {0} values written to {1}".format(nOfValues, fname)
except Exception, e:
QMessageBox.critical(self.widget, "Error", "Not able to write DXF file!" + "\n\n" + str(e))
return
QMessageBox.information(self.widget, "Module ScalarDXF", info)
def setSymbol(self, i):
self.scalarSymbol = i
def setScale(self):
scale = self.ui.spinBoxScale.value()
d = scale/100.0
size_factor = scale*3.0/2000.0
self.ui.doubleSpinBoxDX.setValue(d)
self.ui.doubleSpinBoxDY.setValue(d)
self.ui.doubleSpinBoxSizeFactor.setValue(size_factor)
def getOpenFileName(self, title, fileFormat, lineEdit):
filename = QFileDialog.getOpenFileName(self.widget, title, self.directory, fileFormat)
if filename != "":
lineEdit.setText(filename)
def getSaveFileName(self, title, fileFormat, lineEdit):
filename = QFileDialog.getSaveFileName(self.widget, title, self.directory, fileFormat)
if filename != "":
lineEdit.setText(filename) | gpl-2.0 |
evgchz/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
wdm0006/sklearn-extensions | sklearn_extensions/kernel_regression/kr.py | 1 | 3307 | """The :mod:`sklearn.kernel_regressor` module implements the Kernel Regressor.
"""
# Author: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.base import BaseEstimator, RegressorMixin
class KernelRegression(BaseEstimator, RegressorMixin):
"""Nadaraya-Watson kernel regression with automatic bandwidth selection.
This implements Nadaraya-Watson kernel regression with (optional) automatic
bandwith selection of the kernel via leave-one-out cross-validation. Kernel
regression is a simple non-parametric kernelized technique for learning
a non-linear relationship between input variable(s) and a target variable.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF ("bandwidth"), polynomial,
exponential chi2 and sigmoid kernels. Interpretation of the default
value is left to the kernel; see the documentation for
sklearn.metrics.pairwise. Ignored by other kernels. If a sequence of
values is given, one of these values is selected which minimizes
the mean-squared-error of leave-one-out cross-validation.
See also
--------
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None):
self.kernel = kernel
self.gamma = gamma
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values
Returns
-------
self : object
Returns self.
"""
self.X = X
self.y = y
if hasattr(self.gamma, "__iter__"):
self.gamma = self._optimize_gamma(self.gamma)
return self
def predict(self, X):
"""Predict target values for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted target value.
"""
K = pairwise_kernels(self.X, X, metric=self.kernel, gamma=self.gamma)
return (K * self.y[:, None]).sum(axis=0) / K.sum(axis=0)
def _optimize_gamma(self, gamma_values):
# Select specific value of gamma from the range of given gamma_values
# by minimizing mean-squared error in leave-one-out cross validation
mse = np.empty_like(gamma_values, dtype=np.float)
for i, gamma in enumerate(gamma_values):
K = pairwise_kernels(self.X, self.X, metric=self.kernel, gamma=gamma)
np.fill_diagonal(K, 0) # leave-one-out
Ky = K * self.y[:, np.newaxis]
y_pred = Ky.sum(axis=0) / K.sum(axis=0)
mse[i] = ((y_pred - self.y) ** 2).mean()
return gamma_values[np.nanargmin(mse)] | bsd-3-clause |
pyrocko/pyrocko | test/base/test_gmtpy.py | 1 | 11149 | from __future__ import division, print_function, absolute_import
import os
import math
import tempfile
import shutil
import unittest
import numpy as num
from numpy.testing import assert_allclose
from matplotlib import image, pyplot as plt
from pyrocko import util
from pyrocko.plot import gmtpy
from pyrocko.plot.gmtpy import cm, inch, golden_ratio
from .. import common
plot = False
@unittest.skipUnless(
gmtpy.have_gmt(), 'GMT not available')
@unittest.skipUnless(
gmtpy.have_pixmaptools(), '`pdftocairo` or `convert` not available')
class GmtPyTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def fpath(self, fn):
return os.path.join(self.tempdir, fn)
def fpath_ref(self, fn):
try:
return common.test_data_file(fn)
except util.DownloadError:
return common.test_data_file_no_download(fn)
def compare_with_ref(self, fname, tolerance, show=False):
fpath = self.fpath(fname)
fpath_ref = self.fpath_ref(fname)
if not os.path.exists(fpath_ref):
shutil.copy(fpath, fpath_ref)
img = image.imread(fpath)
img_ref = image.imread(fpath_ref)
self.assertEqual(img.shape, img_ref.shape)
d = num.abs(img - img_ref)
merr = num.mean(d)
if (merr > tolerance or show) and plot:
fig = plt.figure()
axes1 = fig.add_subplot(1, 3, 1, aspect=1.)
axes2 = fig.add_subplot(1, 3, 2, aspect=1.)
axes3 = fig.add_subplot(1, 3, 3, aspect=1.)
axes1.imshow(img)
axes1.set_title('Candidate')
axes2.imshow(img_ref)
axes2.set_title('Reference')
axes3.imshow(d)
axes3.set_title('Mean abs difference: %g' % merr)
plt.show()
plt.close(fig)
assert merr <= tolerance
def test_basic(self):
for version in gmtpy.all_installed_gmt_versions():
width = 8.0 * inch
height = 9.0 * inch
resolution = 72
gmt = gmtpy.GMT(version=version, config_papersize=(width, height))
gmt.pscoast(
X=0,
Y=0,
R='g',
J='E32/30/170/8i',
B='10g10',
D='c',
A=10000,
S=(114, 159, 207),
G=(233, 185, 110),
W='thinnest')
gmt.dump('test')
gmt.load('test')
for oversample in (1, 2):
fname = 'gmtpy_test_basic_o%i.png' % oversample
fpath = self.fpath(fname)
gmt.save(fpath, resolution=resolution, oversample=oversample)
self.compare_with_ref(fname, 0.03)
img = image.imread(fpath, format='png')
self.assertEqual(img.shape, (
int(round(resolution*height/inch)),
int(round(resolution*width/inch)), 3))
def test_basic2(self):
for version in gmtpy.all_installed_gmt_versions():
if version.startswith('5'):
gmt = gmtpy.GMT(
version=version,
config={'MAP_FRAME_TYPE': 'fancy'},
eps_mode=True)
else:
gmt = gmtpy.GMT(
version=version,
config={'BASEMAP_TYPE': 'fancy'})
layout = gmt.default_layout()
widget = layout.get_widget()
xax = gmtpy.Ax(label='Lon', mode='min-max')
yax = gmtpy.Ax(label='Lat', mode='min-max')
scaler = gmtpy.ScaleGuru([([5, 15], [52, 58])], axes=(xax, yax))
par = scaler.get_params()
lon0 = (par['xmin'] + par['xmax'])/2.
lat0 = (par['ymin'] + par['ymax'])/2.
sll = '%g/%g' % (lon0, lat0)
widget['J'] = '-JM' + sll + '/%(width)gp'
widget['J'] = '-JM' + sll + '/%(width)gp'
scaler['B'] = \
'-B%(xinc)gg%(xinc)g:%(xlabel)s:' \
'/%(yinc)gg%(yinc)g:%(ylabel)s:WSen'
aspect = gmtpy.aspect_for_projection(
version, *(widget.J() + scaler.R()))
aspect = 1.045
widget.set_aspect(aspect)
gmt.pscoast(D='h', W='1p,red', *(widget.JXY() + scaler.R()))
gmt.psbasemap(*(widget.JXY() + scaler.BR()))
fname = 'gmtpy_test_basic2.png'
fpath = self.fpath(fname)
gmt.save(fpath, resolution=75, bbox=layout.bbox())
self.compare_with_ref(fname, 0.01, show=False)
def test_layout(self):
x = num.linspace(0., math.pi*6, 1001)
y1 = num.sin(x) * 1e-9
y2 = 2.0 * num.cos(x) * 1e-9
xax = gmtpy.Ax(label='Time', unit='s')
yax = gmtpy.Ax(
label='Amplitude', unit='m', scaled_unit='nm',
scaled_unit_factor=1e9, approx_ticks=5, space=0.05)
guru = gmtpy.ScaleGuru([(x, y1), (x, y2)], axes=(xax, yax))
for version in gmtpy.all_installed_gmt_versions():
width = 8*inch
height = 3*inch
gmt = gmtpy.GMT(
version=version,
config_papersize=(width, height))
layout = gmt.default_layout()
widget = layout.get_widget()
gmt.draw_layout(layout)
gmt.psbasemap(*(widget.JXY() + guru.RB(ax_projection=True)))
gmt.psxy(
in_columns=(x, y1), W='1p,red', *(widget.JXY() + guru.R()))
gmt.psxy(
in_columns=(x, y2), W='1p,blue', *(widget.JXY() + guru.R()))
fname = 'gmtpy_test_layout.png'
fpath = self.fpath(fname)
gmt.save(fpath)
self.compare_with_ref(fname, 0.01)
def test_grid_layout(self):
for version in gmtpy.all_installed_gmt_versions():
gmt = gmtpy.GMT(version=version, config_papersize='a3')
nx, ny = 2, 5
grid = gmtpy.GridLayout(nx, ny)
layout = gmt.default_layout()
layout.set_widget('center', grid)
widgets = []
for iy in range(ny):
for ix in range(nx):
inner = gmtpy.FrameLayout()
inner.set_fixed_margins(
1.*cm*golden_ratio, 1.*cm*golden_ratio, 1.*cm, 1.*cm)
grid.set_widget(ix, iy, inner)
inner.set_vertical(0, (iy+1.))
widgets.append(inner.get_widget('center'))
gmt.draw_layout(layout)
for widget in widgets:
x = num.linspace(0., 10., 5)
y = num.sin(x)
xax = gmtpy.Ax(approx_ticks=4, snap=True)
yax = gmtpy.Ax(approx_ticks=4, snap=True)
guru = gmtpy.ScaleGuru([(x, y)], axes=(xax, yax))
gmt.psbasemap(*(widget.JXY() + guru.RB(ax_projection=True)))
gmt.psxy(in_columns=(x, y), *(widget.JXY() + guru.R()))
fname = 'gmtpy_test_grid_layout.png'
fpath = self.fpath(fname)
gmt.save(fpath, resolution=75)
self.compare_with_ref(fname, 0.01)
def test_simple(self):
x = num.linspace(0., 2*math.pi)
y = num.sin(x)
y2 = num.cos(x)
for version in gmtpy.all_installed_gmt_versions():
for ymode in ['off', 'symmetric', 'min-max', 'min-0', '0-max']:
plot = gmtpy.Simple(gmtversion=version, ymode=ymode)
plot.plot((x, y), '-W1p,%s' % gmtpy.color('skyblue2'))
plot.plot((x, y2), '-W1p,%s' % gmtpy.color(
gmtpy.color_tup('scarletred2')))
plot.text((3., 0.5, 'hello'), size=20.)
fname = 'gmtpy_test_simple_%s.png' % ymode
fpath = self.fpath(fname)
plot.save(fpath)
self.compare_with_ref(fname, 0.01, show=False)
@unittest.skip('won\'t-fix-this')
def test_simple_density(self):
x = num.linspace(0., 2.*math.pi, 50)
y = num.linspace(0., 2.*math.pi, 50)
x2 = num.tile(x, y.size)
y2 = num.repeat(y, x.size)
z2 = num.sin(x2) * num.sin(y2)
for version in gmtpy.all_installed_gmt_versions():
for method in ['surface', 'triangulate', 'fillcontour']:
plot = gmtpy.Simple(gmtversion=version, with_palette=True)
plot.density_plot((x2, y2, z2), method=method)
fname = 'gmtpy_test_simple_density_%s.png' % method
fpath = self.fpath(fname)
plot.save(fpath)
self.compare_with_ref(fname, 0.02)
def test_grid_data(self):
x = num.linspace(0., 2.*math.pi, 100)
y = num.linspace(0., 2.*math.pi, 100)
x2 = num.tile(x, y.size)
y2 = num.repeat(y, x.size)
z2 = num.sin(x2) * num.sin(y2)
xf, yf, zf = gmtpy.griddata_auto(x2, y2, z2)
assert (xf.size, yf.size, zf.size) == (100, 100, 100*100)
x3, y3, z3 = gmtpy.tabledata(xf, yf, zf)
assert_allclose(x3, x2, atol=1e-7)
assert_allclose(y3, y2, atol=1e-7)
assert_allclose(z3, z2, atol=1e-7)
xf2, yf2, zf2 = gmtpy.doublegrid(xf, yf, zf)
assert (xf2.size, yf2.size, zf2.size) == (199, 199, 199*199)
fn = self.fpath('grid.nc')
for naming in ['xy', 'lonlat']:
gmtpy.savegrd(xf, yf, zf, fn, naming=naming, title='mygrid')
xf3, yf3, zf3 = gmtpy.loadgrd(fn)
assert_allclose(xf3, xf)
assert_allclose(yf3, yf)
assert_allclose(zf3, zf)
def test_text_box(self):
for version in gmtpy.all_installed_gmt_versions():
s = gmtpy.text_box('Hello', gmtversion=version)
assert_allclose(s, (25.8, 9.), rtol=0.1)
s = gmtpy.text_box(
'Abc def ghi jkl mno pqr stu vwx yz',
gmtversion=version)
assert_allclose(s, (179.9, 12.3), rtol=0.01)
def test_override_args(self):
x = num.array([0, 0.5, 1, 0])
y = num.array([0, 1, 0, 0])
width = 300
height = 100
config_papersize = (width, height)
for version in gmtpy.all_installed_gmt_versions():
gmt = gmtpy.GMT(
version=version,
config_papersize=config_papersize)
for i, cutoff in enumerate([30, 90]):
gmt.psxy(
in_columns=(i*2+x, y),
W='10p,red',
J='X%gp/%gp' % (width, height),
X=0,
Y=0,
R=(-1, 4, -1, 2),
config={
'PS_MITER_LIMIT': '%i' % cutoff})
fname = 'gmtpy_test_override.png'
fpath = self.fpath(fname)
gmt.save(fpath)
self.compare_with_ref(fname, 0.001, show=False)
if __name__ == "__main__":
plot = True
util.setup_logging('test_gmtpy', 'warning')
unittest.main()
| gpl-3.0 |
gpfreitas/bokeh | bokeh/charts/models.py | 2 | 5912 | from __future__ import absolute_import
from bokeh.models.renderers import GlyphRenderer
from bokeh.models.sources import ColumnDataSource
from bokeh.properties import (HasProps, String, Either, Float, Color, Instance, List,
Any)
from .properties import ColumnLabel, Column
class CompositeGlyph(HasProps):
"""Represents a subset of data.
A collection of hetero or homogeneous glyph
renderers which represent a subset of data. The
purpose of the composite glyph is to abstract
away the details of constructing glyphs, based on
the details of a subset of data, from the grouping
operations that a generalized builders must implement.
In general, the Builder operates at the full column
oriented data source level, segmenting and assigning
attributes from a large selection, while the composite glyphs
will typically be passed an array-like structures with
one or more singlular attributes to apply.
Another way to explain the concept is that the Builder
operates as the groupby, as in pandas, while the
CompositeGlyph operates as the function used in the apply.
What is the responsibility of the Composite Glyph?
- Produce GlyphRenderers
- Apply any aggregations
- Tag the GlyphRenderers with the group label
- Apply transforms due to chart operations
- Note: Operations require implementation of special methods
"""
# composite glyph inputs
label = String('None', help='Identifies the subset of data.')
values = Either(Column(Float), Column(String), help="""Array-like values,
which are used as the input to the composite glyph.""")
# derived from inputs
source = Instance(ColumnDataSource, help="""The data source used for the contained
glyph renderers. Simple glyphs part of the composite glyph might not use the
column data source.""")
renderers = List(Instance(GlyphRenderer))
operations = List(Any, help="""A list of chart operations that can be applied to
manipulate their visual depiction.""")
color = Color(default='gray', help="""A high level color. Some glyphs will
implement more specific color attributes for parts or specific glyphs.""")
line_color = Color(default='black', help="""A default outline color for contained
glyphs.""")
fill_alpha = Float(default=0.8)
left_buffer = Float(default=0.0)
right_buffer = Float(default=0.0)
top_buffer = Float(default=0.0)
bottom_buffer = Float(default=0.0)
def __init__(self, **kwargs):
label = kwargs.pop('label', None)
if label is not None:
if not isinstance(label, str):
label = str(label)
kwargs['label'] = label
super(CompositeGlyph, self).__init__(**kwargs)
def setup(self):
"""Build renderers and data source and set sources on renderers."""
self.renderers = [renderer for renderer in self.build_renderers()]
if self.renderers is not None:
self.refresh()
def refresh(self):
"""Update the GlyphRenderers.
.. note:
this method would be called after data is added.
"""
if self.renderers is not None:
self.source = self.build_source()
self._set_sources()
def build_renderers(self):
raise NotImplementedError('You must return list of renderers.')
def build_source(self):
raise NotImplementedError('You must return ColumnDataSource.')
def _set_sources(self):
"""Store reference to source in each GlyphRenderer.
.. note::
if the glyphs that are part of the composite glyph differ, you may have to
override this method and handle the sources manually.
"""
for renderer in self.renderers:
renderer.data_source = self.source
def __stack__(self, glyphs):
"""A special method the `stack` function applies to composite glyphs."""
pass
def __jitter__(self, glyphs):
"""A special method the `jitter` function applies to composite glyphs."""
pass
def __dodge__(self, glyphs):
"""A special method the `dodge` function applies to composite glyphs."""
pass
def __overlay__(self, glyphs):
"""A special method the `overlay` function applies to composite glyphs."""
pass
def apply_operations(self):
pass
class CollisionModifier(HasProps):
"""Models an special type of operation that alters how glyphs interact.
Used to handle the manipulation of glyphs for operations, such as stacking. The
list of `CompositeGlyph`s can either be input into the `CollisionModifier` as
keyword args, or added individually with the `add_glyph` method.
"""
comp_glyphs = List(Instance(CompositeGlyph), help="""A list of composite glyphs,
to apply the modification to.""")
name = String(help="""The name of the collision modifier.""")
method_name = String(help="""The name of the method that will be utilized on
the composite glyphs. This method must exist on all `comp_glyphs`.""")
columns = Either(ColumnLabel, List(ColumnLabel), help="""Some collision modifiers
might require column labels to apply the operation in relation to.""")
def add_glyph(self, comp_glyph):
self.comp_glyphs.append(comp_glyph)
def apply(self, renderers=None):
if len(self.comp_glyphs) == 0:
self.comp_glyphs = renderers
if len(self.comp_glyphs) > 0:
# the first renderer's operation method is applied to the rest
getattr(self.comp_glyphs[0], self.method_name)(self.comp_glyphs)
else:
raise AttributeError('%s must be applied to available renderers, none found.' %
self.__class__.__name__)
| bsd-3-clause |
airbnb/superset | tests/model_tests.py | 1 | 14095 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import textwrap
import unittest
import pandas
from sqlalchemy.engine.url import make_url
import tests.test_app
from superset import app, db as metadata_db
from superset.models.core import Database
from superset.models.slice import Slice
from superset.utils.core import get_example_database, QueryStatus
from .base_tests import SupersetTestCase
class TestDatabaseModel(SupersetTestCase):
@unittest.skipUnless(
SupersetTestCase.is_module_installed("requests"), "requests not installed"
)
def test_database_schema_presto(self):
sqlalchemy_uri = "presto://presto.airbnb.io:8080/hive/default"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("hive/default", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("hive/core_db", db)
sqlalchemy_uri = "presto://presto.airbnb.io:8080/hive"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("hive", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("hive/core_db", db)
def test_database_schema_postgres(self):
sqlalchemy_uri = "postgresql+psycopg2://postgres.airbnb.io:5439/prod"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("prod", db)
db = make_url(model.get_sqla_engine(schema="foo").url).database
self.assertEqual("prod", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("thrift"), "thrift not installed"
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pyhive"), "pyhive not installed"
)
def test_database_schema_hive(self):
sqlalchemy_uri = "hive://[email protected]:10000/default?auth=NOSASL"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("default", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("core_db", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("MySQLdb"), "mysqlclient not installed"
)
def test_database_schema_mysql(self):
sqlalchemy_uri = "mysql://root@localhost/superset"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("superset", db)
db = make_url(model.get_sqla_engine(schema="staging").url).database
self.assertEqual("staging", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("MySQLdb"), "mysqlclient not installed"
)
def test_database_impersonate_user(self):
uri = "mysql://root@localhost"
example_user = "giuseppe"
model = Database(database_name="test_database", sqlalchemy_uri=uri)
model.impersonate_user = True
user_name = make_url(model.get_sqla_engine(user_name=example_user).url).username
self.assertEqual(example_user, user_name)
model.impersonate_user = False
user_name = make_url(model.get_sqla_engine(user_name=example_user).url).username
self.assertNotEqual(example_user, user_name)
def test_select_star(self):
db = get_example_database()
table_name = "energy_usage"
sql = db.select_star(table_name, show_cols=False, latest_partition=False)
quote = db.inspector.engine.dialect.identifier_preparer.quote_identifier
expected = (
textwrap.dedent(
f"""\
SELECT *
FROM {quote(table_name)}
LIMIT 100"""
)
if db.backend in {"presto", "hive"}
else textwrap.dedent(
f"""\
SELECT *
FROM {table_name}
LIMIT 100"""
)
)
assert expected in sql
sql = db.select_star(table_name, show_cols=True, latest_partition=False)
# TODO(bkyryliuk): unify sql generation
if db.backend == "presto":
assert (
textwrap.dedent(
"""\
SELECT "source" AS "source",
"target" AS "target",
"value" AS "value"
FROM "energy_usage"
LIMIT 100"""
)
== sql
)
elif db.backend == "hive":
assert (
textwrap.dedent(
"""\
SELECT `source`,
`target`,
`value`
FROM `energy_usage`
LIMIT 100"""
)
== sql
)
else:
assert (
textwrap.dedent(
"""\
SELECT source,
target,
value
FROM energy_usage
LIMIT 100"""
)
in sql
)
def test_select_star_fully_qualified_names(self):
db = get_example_database()
schema = "schema.name"
table_name = "table/name"
sql = db.select_star(
table_name, schema=schema, show_cols=False, latest_partition=False
)
fully_qualified_names = {
"sqlite": '"schema.name"."table/name"',
"mysql": "`schema.name`.`table/name`",
"postgres": '"schema.name"."table/name"',
}
fully_qualified_name = fully_qualified_names.get(db.db_engine_spec.engine)
if fully_qualified_name:
expected = textwrap.dedent(
f"""\
SELECT *
FROM {fully_qualified_name}
LIMIT 100"""
)
assert sql.startswith(expected)
def test_single_statement(self):
main_db = get_example_database()
if main_db.backend == "mysql":
df = main_db.get_df("SELECT 1", None)
self.assertEqual(df.iat[0, 0], 1)
df = main_db.get_df("SELECT 1;", None)
self.assertEqual(df.iat[0, 0], 1)
def test_multi_statement(self):
main_db = get_example_database()
if main_db.backend == "mysql":
df = main_db.get_df("USE superset; SELECT 1", None)
self.assertEqual(df.iat[0, 0], 1)
df = main_db.get_df("USE superset; SELECT ';';", None)
self.assertEqual(df.iat[0, 0], ";")
class TestSqlaTableModel(SupersetTestCase):
def test_get_timestamp_expression(self):
tbl = self.get_table_by_name("birth_names")
ds_col = tbl.get_column("ds")
sqla_literal = ds_col.get_timestamp_expression(None)
self.assertEqual(str(sqla_literal.compile()), "ds")
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(ds)")
prev_ds_expr = ds_col.expression
ds_col.expression = "DATE_ADD(ds, 1)"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(DATE_ADD(ds, 1))")
ds_col.expression = prev_ds_expr
def test_get_timestamp_expression_epoch(self):
tbl = self.get_table_by_name("birth_names")
ds_col = tbl.get_column("ds")
ds_col.expression = None
ds_col.python_date_format = "epoch_s"
sqla_literal = ds_col.get_timestamp_expression(None)
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "from_unixtime(ds)")
ds_col.python_date_format = "epoch_s"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(from_unixtime(ds))")
prev_ds_expr = ds_col.expression
ds_col.expression = "DATE_ADD(ds, 1)"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(from_unixtime(DATE_ADD(ds, 1)))")
ds_col.expression = prev_ds_expr
def query_with_expr_helper(self, is_timeseries, inner_join=True):
tbl = self.get_table_by_name("birth_names")
ds_col = tbl.get_column("ds")
ds_col.expression = None
ds_col.python_date_format = None
spec = self.get_database_by_id(tbl.database_id).db_engine_spec
if not spec.allows_joins and inner_join:
# if the db does not support inner joins, we cannot force it so
return None
old_inner_join = spec.allows_joins
spec.allows_joins = inner_join
arbitrary_gby = "state || gender || '_test'"
arbitrary_metric = dict(
label="arbitrary", expressionType="SQL", sqlExpression="SUM(sum_boys)"
)
query_obj = dict(
groupby=[arbitrary_gby, "name"],
metrics=[arbitrary_metric],
filter=[],
is_timeseries=is_timeseries,
columns=[],
granularity="ds",
from_dttm=None,
to_dttm=None,
extras=dict(time_grain_sqla="P1Y"),
)
qr = tbl.query(query_obj)
self.assertEqual(qr.status, QueryStatus.SUCCESS)
sql = qr.query
self.assertIn(arbitrary_gby, sql)
self.assertIn("name", sql)
if inner_join and is_timeseries:
self.assertIn("JOIN", sql.upper())
else:
self.assertNotIn("JOIN", sql.upper())
spec.allows_joins = old_inner_join
self.assertFalse(qr.df.empty)
return qr.df
def test_query_with_expr_groupby_timeseries(self):
if get_example_database().backend == "presto":
# TODO(bkyryliuk): make it work for presto.
return
def cannonicalize_df(df):
ret = df.sort_values(by=list(df.columns.values), inplace=False)
ret.reset_index(inplace=True, drop=True)
return ret
df1 = self.query_with_expr_helper(is_timeseries=True, inner_join=True)
name_list1 = cannonicalize_df(df1).name.values.tolist()
df2 = self.query_with_expr_helper(is_timeseries=True, inner_join=False)
name_list2 = cannonicalize_df(df1).name.values.tolist()
self.assertFalse(df2.empty)
expected_namelist = [
"Anthony",
"Brian",
"Christopher",
"Daniel",
"David",
"Eric",
"James",
"Jeffrey",
"John",
"Joseph",
"Kenneth",
"Kevin",
"Mark",
"Michael",
"Paul",
]
assert name_list2 == expected_namelist
assert name_list1 == expected_namelist
def test_query_with_expr_groupby(self):
self.query_with_expr_helper(is_timeseries=False)
def test_sql_mutator(self):
tbl = self.get_table_by_name("birth_names")
query_obj = dict(
groupby=[],
metrics=[],
filter=[],
is_timeseries=False,
columns=["name"],
granularity=None,
from_dttm=None,
to_dttm=None,
extras={},
)
sql = tbl.get_query_str(query_obj)
self.assertNotIn("-- COMMENT", sql)
def mutator(*args):
return "-- COMMENT\n" + args[0]
app.config["SQL_QUERY_MUTATOR"] = mutator
sql = tbl.get_query_str(query_obj)
self.assertIn("-- COMMENT", sql)
app.config["SQL_QUERY_MUTATOR"] = None
def test_query_with_non_existent_metrics(self):
tbl = self.get_table_by_name("birth_names")
query_obj = dict(
groupby=[],
metrics=["invalid"],
filter=[],
is_timeseries=False,
columns=["name"],
granularity=None,
from_dttm=None,
to_dttm=None,
extras={},
)
with self.assertRaises(Exception) as context:
tbl.get_query_str(query_obj)
self.assertTrue("Metric 'invalid' does not exist", context.exception)
def test_data_for_slices(self):
tbl = self.get_table_by_name("birth_names")
slc = (
metadata_db.session.query(Slice)
.filter_by(datasource_id=tbl.id, datasource_type=tbl.type)
.first()
)
data_for_slices = tbl.data_for_slices([slc])
self.assertEqual(len(data_for_slices["columns"]), 0)
self.assertEqual(len(data_for_slices["metrics"]), 1)
self.assertEqual(len(data_for_slices["verbose_map"].keys()), 2)
| apache-2.0 |
evanbiederstedt/RRBSfun | trees/chrom_scripts/cll_chr21.py | 1 | 8247 | import glob
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', 50) # print all rows
import os
os.chdir("/gpfs/commons/home/biederstedte-934/evan_projects/correct_phylo_files")
cw154 = glob.glob("binary_position_RRBS_cw154*")
trito = glob.glob("binary_position_RRBS_trito_pool*")
print(len(cw154))
print(len(trito))
totalfiles = cw154 + trito
print(len(totalfiles))
df_list = []
for file in totalfiles:
df = pd.read_csv(file)
df = df.drop("Unnamed: 0", axis=1)
df["chromosome"] = df["position"].map(lambda x: str(x)[:5])
df = df[df["chromosome"] == "chr21"]
df = df.drop("chromosome", axis=1)
df_list.append(df)
print(len(df_list))
total_matrix = pd.concat([df.set_index("position") for df in df_list], axis=1).reset_index().astype(object)
total_matrix = total_matrix.drop("index", axis=1)
len(total_matrix.columns)
total_matrix.columns = ['RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACAACC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACCGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACGTGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ACTCAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.AGGATG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATAGCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.ATCGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CAAGAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CATGAC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CCTTCG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CGGTAG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.CTCAGC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GACACG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCATTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GCTGCC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GGCATC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.GTGAGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TAGCGG',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TATCTC',
'RRBS_cw154_CutSmart_proteinase_K_TAGGCATG.TCTCTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACAACC',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACCGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACGTGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ACTCAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.AGGATG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATAGCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.ATCGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CATGAC',
'RRBS_cw154_Tris_protease_CTCTCTAC.CCTTCG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CGGTAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTATTG',
'RRBS_cw154_Tris_protease_CTCTCTAC.CTCAGC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GACACG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCATTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GCTGCC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GGCATC',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTGAGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.GTTGAG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TAGCGG',
'RRBS_cw154_Tris_protease_CTCTCTAC.TATCTC',
'RRBS_cw154_Tris_protease_CTCTCTAC.TCTCTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACAACC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACCGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACGTGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ACTCAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.AGGATG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATAGCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.ATCGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CATGAC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CCTTCG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CGGTAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTATTG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.CTCAGC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GACACG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCATTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GCTGCC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GGCATC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTGAGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.GTTGAG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TAGCGG',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TATCTC',
'RRBS_cw154_Tris_protease_GR_CAGAGAGG.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.ACAACC',
'RRBS_trito_pool_1_TAAGGCGA.ACGTGG',
'RRBS_trito_pool_1_TAAGGCGA.ACTCAC',
'RRBS_trito_pool_1_TAAGGCGA.ATAGCG',
'RRBS_trito_pool_1_TAAGGCGA.ATCGAC',
'RRBS_trito_pool_1_TAAGGCGA.CAAGAG',
'RRBS_trito_pool_1_TAAGGCGA.CATGAC',
'RRBS_trito_pool_1_TAAGGCGA.CCTTCG',
'RRBS_trito_pool_1_TAAGGCGA.CGGTAG',
'RRBS_trito_pool_1_TAAGGCGA.CTATTG',
'RRBS_trito_pool_1_TAAGGCGA.GACACG',
'RRBS_trito_pool_1_TAAGGCGA.GCATTC',
'RRBS_trito_pool_1_TAAGGCGA.GCTGCC',
'RRBS_trito_pool_1_TAAGGCGA.GGCATC',
'RRBS_trito_pool_1_TAAGGCGA.GTGAGG',
'RRBS_trito_pool_1_TAAGGCGA.GTTGAG',
'RRBS_trito_pool_1_TAAGGCGA.TAGCGG',
'RRBS_trito_pool_1_TAAGGCGA.TATCTC',
'RRBS_trito_pool_1_TAAGGCGA.TCTCTG',
'RRBS_trito_pool_1_TAAGGCGA.TGACAG',
'RRBS_trito_pool_1_TAAGGCGA.TGCTGC',
'RRBS_trito_pool_2_CGTACTAG.ACAACC',
'RRBS_trito_pool_2_CGTACTAG.ACGTGG',
'RRBS_trito_pool_2_CGTACTAG.ACTCAC',
'RRBS_trito_pool_2_CGTACTAG.AGGATG',
'RRBS_trito_pool_2_CGTACTAG.ATAGCG',
'RRBS_trito_pool_2_CGTACTAG.ATCGAC',
'RRBS_trito_pool_2_CGTACTAG.CAAGAG',
'RRBS_trito_pool_2_CGTACTAG.CATGAC',
'RRBS_trito_pool_2_CGTACTAG.CCTTCG',
'RRBS_trito_pool_2_CGTACTAG.CGGTAG',
'RRBS_trito_pool_2_CGTACTAG.CTATTG',
'RRBS_trito_pool_2_CGTACTAG.GACACG',
'RRBS_trito_pool_2_CGTACTAG.GCATTC',
'RRBS_trito_pool_2_CGTACTAG.GCTGCC',
'RRBS_trito_pool_2_CGTACTAG.GGCATC',
'RRBS_trito_pool_2_CGTACTAG.GTGAGG',
'RRBS_trito_pool_2_CGTACTAG.GTTGAG',
'RRBS_trito_pool_2_CGTACTAG.TAGCGG',
'RRBS_trito_pool_2_CGTACTAG.TATCTC',
'RRBS_trito_pool_2_CGTACTAG.TCTCTG',
'RRBS_trito_pool_2_CGTACTAG.TGACAG']
print(total_matrix.shape)
total_matrix = total_matrix.applymap(lambda x: int(x) if pd.notnull(x) else str("?"))
total_matrix = total_matrix.astype(str).apply(''.join)
tott = pd.Series(total_matrix.index.astype(str).str.cat(total_matrix.astype(str),' '))
tott.to_csv("cll_chr21.phy", header=None, index=None)
print(tott.shape)
| mit |
Berreman4x4/Berreman4x4 | examples/FrustratedTIR-thickness.py | 1 | 4230 | #!/usr/bin/python
# encoding: utf-8
# Berreman4x4 example
# Author: O. Castany, C. Molinaro
# Frustrated Total Internal Reflection
# Glass1 / Air / Glass2
import numpy, Berreman4x4
from Berreman4x4 import c, pi
from numpy import exp, cos, arcsin, real, sqrt
import matplotlib.pyplot as pyplot
print("\n*** Glass1 / Air / Glass2 ***\n")
############################################################################
# Structure definition
# Refractive indices
n_f = 1.5
n_s = 1.0
n_b = 1.7
# Materials:
glass1 = Berreman4x4.IsotropicNonDispersiveMaterial(n_f)
air = Berreman4x4.IsotropicNonDispersiveMaterial(n_s)
glass2 = Berreman4x4.IsotropicNonDispersiveMaterial(n_b)
# Layer and half-spaces:
front = Berreman4x4.IsotropicHalfSpace(glass1)
layer = Berreman4x4.HomogeneousIsotropicLayer(air)
back = Berreman4x4.IsotropicHalfSpace(glass2)
# Structure:
s = Berreman4x4.Structure(front, [layer], back)
# Wavelength and wavenumber:
lbda = 1e-6
k0 = 2*pi/lbda
Phi_i = pi/2 * 0.6 # Incidence angle (higher than the limit angle)
# Air thickness variation range
d = numpy.linspace(0, 1.0e-6)
############################################################################
# Analytical calculation
Kx = n_f*numpy.sin(Phi_i) # Reduced wavenumber
# Incidence angle
Phi_s = arcsin((complex(Kx/n_s)))
Phi_b = arcsin(Kx/n_b)
# Wave vector:
kz_f = n_f*k0*cos(Phi_i)
kz_s = k0*sqrt(complex(n_s**2 - Kx**2))
kz_b = n_b*k0*cos(Phi_b)
############################################################################
# The following paragraph is copied from FrustratedTIR_Angle.py
############################################################################
# Amplitude coefficient polarisation s:
r_sf_s = (kz_f-kz_s)/(kz_s+kz_f)
r_bs_s = (kz_s-kz_b)/(kz_s+kz_b)
t_sf_s = 1+r_sf_s
t_bs_s = 1+r_bs_s
# Amplitude coefficient polarisation p:
r_sf_p = (kz_f*n_s**2-kz_s*n_f**2)/(kz_s*n_f**2+kz_f*n_s**2)
r_bs_p = (kz_s*n_b**2-kz_b*n_s**2)/(kz_s*n_b**2+kz_b*n_s**2)
t_sf_p = cos(Phi_i)*(1-r_sf_p)/cos(Phi_s)
t_bs_p = cos(Phi_s)*(1-r_bs_p)/cos(Phi_b)
# Power coefficients:
R_th_s = (abs((r_sf_s+r_bs_s*exp(2j*kz_s*d)) \
/(1+r_bs_s*r_sf_s*exp(2j*kz_s*d))))**2
t2_th_s = (abs((t_bs_s*t_sf_s*exp(1j*kz_s*d)) \
/(1+r_bs_s*r_sf_s*exp(2j*kz_s*d))))**2
R_th_p = (abs((r_sf_p+r_bs_p*exp(2j*kz_s*d)) \
/(1+r_bs_p*r_sf_p*exp(2j*kz_s*d))))**2
t2_th_p= (abs((t_bs_p*t_sf_p*exp(1j*kz_s*d)) \
/(1+r_bs_p*r_sf_p*exp(2j*kz_s*d))))**2
correction = real(n_b*cos(Phi_b)/(n_f*cos(Phi_i)))
# This is a correction term used in R +T*correction = 1
T_th_s = t2_th_s*correction
T_th_p = t2_th_p*correction
############################################################################
############################################################################
# Calculation with Berreman4x4
Kx = front.get_Kx_from_Phi(Phi_i, k0) # Reduced wavenumber
data = Berreman4x4.DataList()
for dd in d:
layer.setThickness(dd)
data.append(s.evaluate(Kx,k0))
# Extraction of the transmission and reflexion coefficients
R_p = data.get('R_pp')
R_s = data.get('R_ss')
T_p = data.get('T_pp')
T_s = data.get('T_ss')
t2_p = abs(data.get('t_pp'))**2 # Before power correction
t2_s = abs(data.get('t_ss'))**2
############################################################################
# Plotting
fig = pyplot.figure(figsize=(12., 6.))
pyplot.rcParams['axes.prop_cycle'] = pyplot.cycler('color', 'bgrcbg')
ax = fig.add_axes([0.1, 0.1, 0.7, 0.8])
y = numpy.vstack((R_s,R_p,t2_s,t2_p,T_s,T_p)).T
legend1 = ("R_s","R_p","t2_s","t2_p","T_s","T_p")
lines1 = ax.plot(d, y)
y_th = numpy.vstack((R_th_s,R_th_p,t2_th_s,t2_th_p,T_th_s,T_th_p)).T
legend2 = ("R_th_s","R_th_p","t2_th_s","t2_th_p","T_th_s","T_th_p")
lines2 = ax.plot(d, y_th, 'x')
ax.legend(lines1 + lines2, legend1 + legend2,
loc='upper left', bbox_to_anchor=(1.05, 1), borderaxespad=0.)
ax.set_title("FTIR: Glass1 / Air (d) / Glass2, for incidence angle " +
"$\Phi_i$ = {:.0f}$^\circ$".format(Phi_i*180/pi))
ax.set_xlabel(r"Air layer thickness, $d$ (m)")
ax.set_ylabel(r"Reflexion and transmission coefficients $R$, $T$")
fmt = ax.xaxis.get_major_formatter()
fmt.set_powerlimits((-3,3))
pyplot.show()
| gpl-3.0 |
jereze/scikit-learn | sklearn/decomposition/dict_learning.py | 104 | 44632 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=False)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
# Transposing product to ensure Fortran ordering
gram = np.dot(dictionary, dictionary.T).T
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
rhuelga/sms-tools | lectures/04-STFT/plots-code/window-size.py | 2 | 1508 | import math
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DF
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 128
start = int(.81*fs)
x1 = x[start:start+N]
plt.figure(1, figsize=(9.5, 6))
plt.subplot(321)
plt.plot(np.arange(start, (start+N), 1.0)/fs, x1*np.hamming(N), 'b', lw=1.5)
plt.axis([start/fs, (start+N)/fs, min(x1*np.hamming(N)), max(x1*np.hamming(N))])
plt.title('x1, M = 128')
mX, pX = DF.dftAnal(x1, np.hamming(N), N)
plt.subplot(323)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5)
plt.axis([0,fs/2.0,-90,max(mX)])
plt.title('mX1')
plt.subplot(325)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/2.0,min(pX),max(pX)])
plt.title('pX1')
N = 1024
start = int(.81*fs)
x2 = x[start:start+N]
mX, pX = DF.dftAnal(x2, np.hamming(N), N)
plt.subplot(322)
plt.plot(np.arange(start, (start+N), 1.0)/fs, x2*np.hamming(N), 'b', lw=1.5)
plt.axis([start/fs, (start+N)/fs, min(x2), max(x2)])
plt.title('x2, M = 1024')
plt.subplot(324)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5)
plt.axis([0,fs/2.0,-90,max(mX)])
plt.title('mX2')
plt.subplot(326)
plt.plot((fs/2.0)*np.arange(mX.size)/float(mX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/2.0,min(pX),max(pX)])
plt.title('pX2')
plt.tight_layout()
plt.savefig('window-size.png')
plt.show()
| agpl-3.0 |
tomlof/scikit-learn | examples/svm/plot_svm_margin.py | 88 | 2540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors (margin away from hyperplane in direction
# perpendicular to hyperplane). This is sqrt(1+a^2) away vertically in
# 2-d.
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy - np.sqrt(1 + a ** 2) * margin
yy_up = yy + np.sqrt(1 + a ** 2) * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10, edgecolors='k')
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired,
edgecolors='k')
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
IshankGulati/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
PascalSteger/gravimage | programs/reducedata/grw_com.py | 1 | 5672 | #!/usr/bin/env ipython3
##
# @file
# calculate approximative center of mass, assuming constant stellar mass
# 3D version, see grw_COM for 2D
# (c) GPL v3 2015 ETHZ Pascal S.P. Steger, [email protected]
import numpy as np
import pdb
from pylab import *
ion()
from gi_helper import expDtofloat
#import gi_class_files as gcf
# TODE find missing modules for both below
#import gi_centering as gc
def run(gp):
import gr_params
gpr = gr_params.grParams(gp)
## read input measurements
print('input: ', gpr.fil)
x0,y0,z0,vb0,vz0,Mg0,PM0,comp0=np.genfromtxt(gpr.fil,skiprows=0,unpack=True,\
usecols=(0, 1, 2, 5, 11, 13, 19, 20),\
dtype="d17",\
converters={0:expDtofloat, # x0 in pc \
1:expDtofloat, # y0 in pc \
2:expDtofloat, # z0 in pc \
5:expDtofloat, # vz0 in km/s\
12:expDtofloat, # vb0(LOS due binary), km/s\
13:expDtofloat, # Mg0 in Angstrom\
19:expDtofloat, # PM0 [1]\
20:expDtofloat}) # comp0 1,2,3(background)
# use component 12-1 instead of 6-1 for z velocity, to exclude observational errors
# only use stars which are members of the dwarf: exclude pop3 by construction
pm = (PM0 >= gpr.pmsplit) # exclude foreground contamination, outliers
PM0 = PM0[pm]
comp0 = comp0[pm]
x0 = x0[pm]
y0 = y0[pm]
z0 = z0[pm]
vz0 = vz0[pm]; vb0 = vb0[pm]; Mg0 = Mg0[pm]
pm1 = (comp0 == 1) # will be overwritten below if gp.metalpop
pm2 = (comp0 == 2) # same same
pm3 = (comp0 == 3)
if gp.metalpop:
# drawing of populations based on metallicity
# get parameters from function in pymcmetal.py
import pickle
fi = open('metalsplit.dat', 'rb')
DATA = pickle.load(fi)
fi.close()
p, mu1, sig1, mu2, sig2, M, pm1, pm2 = DATA
# cutting pm_i to a maximum of ntracers particles:
ind = np.arange(len(x0))
np.random.shuffle(ind)
ind = ind[:np.sum(gp.ntracer)]
x0 = x0[ind]; y0 = y0[ind]; z0 = z0[ind]; comp0 = comp0[ind]
vz0 = vz0[ind]; vb0=vb0[ind]; Mg0 = Mg0[ind]
PM0 = PM0[ind]; pm1 = pm1[ind]; pm2 = pm2[ind]; pm3 = pm3[ind];
pm = pm1+pm2+pm3
# get COM with shrinking sphere method
com_x, com_y, com_z = com_shrinkcircle(x0,y0,z0,PM0)
print('COM [pc]: ', com_x, com_y, com_z)
com_vz = np.sum(vz0*PM0)/np.sum(PM0) # [km/s]
print('VOM [km/s]', com_vz)
# from now on, continue to work with 3D data. store to different files
x0 -= com_x; y0 -= com_y; z0 -= com_z # [pc]
vz0 -= com_vz #[km/s]
# but still get the same radii as from 2D method, to get comparison of integration routines right
r0 = np.sqrt(x0*x0+y0*y0+z0*z0) # [pc]
rhalf = np.median(r0) # [pc]
rscale = rhalf # or gpr.r_DM # [pc]
print('rscale = ', rscale, ' pc')
print('max(R) = ', max(r0) ,' pc')
print('last element of R : ',r0[-1],' pc')
print('total number of stars: ',len(r0))
pop = -1
for pmn in [pm, pm1, pm2]:
pmr = (r0<(gp.maxR*rscale)) # [1] based on [pc]
pmn = pmn*pmr # [1]
print("fraction of members = ", 1.0*sum(pmn)/len(pmn))
pop = pop + 1
x = x0[pmn]; y = y0[pmn]; z = z0[pmn]; vz = vz0[pmn]; vb = vb0[pmn]; # [pc], [km/s]
Mg = Mg0[pmn]; comp = comp0[pmn]; PMN = PM0[pmn] # [ang], [1], [1]
m = np.ones(len(pmn))
rscalei = np.median(np.sqrt(x*x+y*y+z*z))
# print("x y z" on first line, to interprete data later on)
crscale = open(gp.files.get_scale_file(pop)+'_3D','w')
print('# rscale in [pc], surfdens_central (=dens0) in [Munit/rscale0^2], and in [Munit/pc^2], and totmass_tracers [Munit], and max(sigma_LOS) in [km/s]', file=crscale)
print(rscalei, file=crscale) # use 3 different half-light radii
crscale.close()
# store recentered positions and velocity
print('output: ',gp.files.get_com_file(pop)+'_3D')
c = open(gp.files.get_com_file(pop)+'_3D','w')
print('# x [rscale],','y [rscale],', 'z [rscale]','vLOS [km/s],','rscale = ',rscalei,' pc', file=c)
for k in range(len(x)):
print(x[k]/rscalei, y[k]/rscalei, z[k]/rscalei, vz[k], file=c) # 3* [pc], [km/s]
c.close()
if gpr.showplots and False:
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#res = (abs(x)<3*rscalei)*(abs(y)<3*rscalei)
#x = x[res]; y = y[res]; z = z[res]
en = len(x)
ax.scatter3D(x[:en], y[:en], z[:en], c=pmn[:en], s=35, \
vmin=0.95, vmax=1.0, lw=0.0, alpha=0.2)
#circ_HL=Circle((0,0), radius=rscalei, fc='None', ec='b', lw=1)
#gca().add_patch(circ_HL)
#circ_DM=Circle((0,0), radius=gpr.r_DM, fc='None', ec='r', lw=1)
#gca().add_patch(circ_DM)
pdb.set_trace()
gpr.show_part_pos(x, y, pmn, rscalei)
if __name__=='__main__':
gpr.showplots = True
import gi_params
gp = gi_params.Params()
run(gp)
| gpl-2.0 |
elijah513/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/arrays/test_numpy.py | 2 | 5496 | """
Additional tests for PandasArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import pandas as pd
from pandas.arrays import PandasArray
from pandas.core.arrays.numpy_ import PandasDtype
import pandas.util.testing as tm
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def any_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# PandasDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = PandasDtype(np.dtype("int64"))
assert repr(dtype) == "PandasDtype('int64')"
def test_constructor_from_string():
result = PandasDtype.construct_from_string("int64")
expected = PandasDtype(np.dtype("int64"))
assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
PandasArray([1, 2, 3])
def test_series_constructor_with_copy():
ndarray = np.array([1, 2, 3])
ser = pd.Series(PandasArray(ndarray), copy=True)
assert ser.values is not ndarray
def test_series_constructor_with_astype():
ndarray = np.array([1, 2, 3])
result = pd.Series(PandasArray(ndarray), dtype="float64")
expected = pd.Series([1.0, 2.0, 3.0], dtype="float64")
tm.assert_series_equal(result, expected)
def test_from_sequence_dtype():
arr = np.array([1, 2, 3], dtype="int64")
result = PandasArray._from_sequence(arr, dtype="uint64")
expected = PandasArray(np.array([1, 2, 3], dtype="uint64"))
tm.assert_extension_array_equal(result, expected)
def test_constructor_copy():
arr = np.array([0, 1])
result = PandasArray(arr, copy=True)
assert np.shares_memory(result._ndarray, arr) is False
def test_constructor_with_data(any_numpy_array):
nparr = any_numpy_array
arr = PandasArray(nparr)
assert arr.dtype.numpy_dtype == nparr.dtype
# ----------------------------------------------------------------------------
# Conversion
def test_to_numpy():
arr = PandasArray(np.array([1, 2, 3]))
result = arr.to_numpy()
assert result is arr._ndarray
result = arr.to_numpy(copy=True)
assert result is not arr._ndarray
result = arr.to_numpy(dtype="f8")
expected = np.array([1, 2, 3], dtype="f8")
tm.assert_numpy_array_equal(result, expected)
# ----------------------------------------------------------------------------
# Setitem
def test_setitem_series():
ser = pd.Series([1, 2, 3])
ser.array[0] = 10
expected = pd.Series([10, 2, 3])
tm.assert_series_equal(ser, expected)
def test_setitem(any_numpy_array):
nparr = any_numpy_array
arr = PandasArray(nparr, copy=True)
arr[0] = arr[1]
nparr[0] = nparr[1]
tm.assert_numpy_array_equal(arr.to_numpy(), nparr)
# ----------------------------------------------------------------------------
# Reductions
def test_bad_reduce_raises():
arr = np.array([1, 2, 3], dtype="int64")
arr = PandasArray(arr)
msg = "cannot perform not_a_method with type int"
with pytest.raises(TypeError, match=msg):
arr._reduce(msg)
def test_validate_reduction_keyword_args():
arr = PandasArray(np.array([1, 2, 3]))
msg = "the 'keepdims' parameter is not supported .*all"
with pytest.raises(ValueError, match=msg):
arr.all(keepdims=True)
# ----------------------------------------------------------------------------
# Ops
def test_ufunc():
arr = PandasArray(np.array([-1.0, 0.0, 1.0]))
result = np.abs(arr)
expected = PandasArray(np.abs(arr._ndarray))
tm.assert_extension_array_equal(result, expected)
r1, r2 = np.divmod(arr, np.add(arr, 2))
e1, e2 = np.divmod(arr._ndarray, np.add(arr._ndarray, 2))
e1 = PandasArray(e1)
e2 = PandasArray(e2)
tm.assert_extension_array_equal(r1, e1)
tm.assert_extension_array_equal(r2, e2)
def test_basic_binop():
# Just a basic smoke test. The EA interface tests exercise this
# more thoroughly.
x = PandasArray(np.array([1, 2, 3]))
result = x + x
expected = PandasArray(np.array([2, 4, 6]))
tm.assert_extension_array_equal(result, expected)
| apache-2.0 |
dsquareindia/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 23 | 5460 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/tri/tritools.py | 10 | 12880 | """
Tools for triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib.tri import Triangulation
import numpy as np
class TriAnalyzer(object):
"""
Define basic tools for triangular mesh analysis and improvement.
A TriAnalizer encapsulates a :class:`~matplotlib.tri.Triangulation`
object and provides basic tools for mesh analysis and mesh improvement.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation` object
The encapsulated triangulation to analyze.
Attributes
----------
`scale_factors`
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
@property
def scale_factors(self):
"""
Factors to rescale the triangulation into a unit square.
Returns *k*, tuple of 2 scale factors.
Returns
-------
k : tuple of 2 floats (kx, ky)
Tuple of floats that would rescale the triangulation :
``[triangulation.x * kx, triangulation.y * ky]``
fits exactly inside a unit square.
"""
compressed_triangles = self._triangulation.get_masked_triangles()
node_used = (np.bincount(np.ravel(compressed_triangles),
minlength=self._triangulation.x.size) != 0)
x = self._triangulation.x[node_used]
y = self._triangulation.y[node_used]
ux = np.max(x)-np.min(x)
uy = np.max(y)-np.min(y)
return (1./float(ux), 1./float(uy))
def circle_ratios(self, rescale=True):
"""
Returns a measure of the triangulation triangles flatness.
The ratio of the incircle radius over the circumcircle radius is a
widely used indicator of a triangle flatness.
It is always ``<= 0.5`` and ``== 0.5`` only for equilateral
triangles. Circle ratios below 0.01 denote very flat triangles.
To avoid unduly low values due to a difference of scale between the 2
axis, the triangular mesh can first be rescaled to fit inside a unit
square with :attr:`scale_factors` (Only if *rescale* is True, which is
its default value).
Parameters
----------
rescale : boolean, optional
If True, a rescaling will be internally performed (based on
:attr:`scale_factors`, so that the (unmasked) triangles fit
exactly inside a unit square mesh. Default is True.
Returns
-------
circle_ratios : masked array
Ratio of the incircle radius over the
circumcircle radius, for each 'rescaled' triangle of the
encapsulated triangulation.
Values corresponding to masked triangles are masked out.
"""
# Coords rescaling
if rescale:
(kx, ky) = self.scale_factors
else:
(kx, ky) = (1.0, 1.0)
pts = np.vstack([self._triangulation.x*kx,
self._triangulation.y*ky]).T
tri_pts = pts[self._triangulation.triangles]
# Computes the 3 side lengths
a = tri_pts[:, 1, :] - tri_pts[:, 0, :]
b = tri_pts[:, 2, :] - tri_pts[:, 1, :]
c = tri_pts[:, 0, :] - tri_pts[:, 2, :]
a = np.sqrt(a[:, 0]**2 + a[:, 1]**2)
b = np.sqrt(b[:, 0]**2 + b[:, 1]**2)
c = np.sqrt(c[:, 0]**2 + c[:, 1]**2)
# circumcircle and incircle radii
s = (a+b+c)*0.5
prod = s*(a+b-s)*(a+c-s)*(b+c-s)
# We have to deal with flat triangles with infinite circum_radius
bool_flat = (prod == 0.)
if np.any(bool_flat):
# Pathologic flow
ntri = tri_pts.shape[0]
circum_radius = np.empty(ntri, dtype=np.float64)
circum_radius[bool_flat] = np.inf
abc = a*b*c
circum_radius[~bool_flat] = abc[~bool_flat] / (
4.0*np.sqrt(prod[~bool_flat]))
else:
# Normal optimized flow
circum_radius = (a*b*c) / (4.0*np.sqrt(prod))
in_radius = (a*b*c) / (4.0*circum_radius*s)
circle_ratio = in_radius/circum_radius
mask = self._triangulation.mask
if mask is None:
return circle_ratio
else:
return np.ma.array(circle_ratio, mask=mask)
def get_flat_tri_mask(self, min_circle_ratio=0.01, rescale=True):
"""
Eliminates excessively flat border triangles from the triangulation.
Returns a mask *new_mask* which allows to clean the encapsulated
triangulation from its border-located flat triangles
(according to their :meth:`circle_ratios`).
This mask is meant to be subsequently applied to the triangulation
using :func:`matplotlib.tri.Triangulation.set_mask` .
*new_mask* is an extension of the initial triangulation mask
in the sense that an initially masked triangle will remain masked.
The *new_mask* array is computed recursively ; at each step flat
triangles are removed only if they share a side with the current
mesh border. Thus no new holes in the triangulated domain will be
created.
Parameters
----------
min_circle_ratio : float, optional
Border triangles with incircle/circumcircle radii ratio r/R will
be removed if r/R < *min_circle_ratio*. Default value: 0.01
rescale : boolean, optional
If True, a rescaling will first be internally performed (based on
:attr:`scale_factors` ), so that the (unmasked) triangles fit
exactly inside a unit square mesh. This rescaling accounts for the
difference of scale which might exist between the 2 axis. Default
(and recommended) value is True.
Returns
-------
new_mask : array-like of booleans
Mask to apply to encapsulated triangulation.
All the initially masked triangles remain masked in the
*new_mask*.
Notes
-----
The rationale behind this function is that a Delaunay
triangulation - of an unstructured set of points - sometimes contains
almost flat triangles at its border, leading to artifacts in plots
(especially for high-resolution contouring).
Masked with computed *new_mask*, the encapsulated
triangulation would contain no more unmasked border triangles
with a circle ratio below *min_circle_ratio*, thus improving the
mesh quality for subsequent plots or interpolation.
Examples
--------
Please refer to the following illustrating example:
.. plot:: mpl_examples/pylab_examples/tricontour_smooth_delaunay.py
"""
# Recursively computes the mask_current_borders, true if a triangle is
# at the border of the mesh OR touching the border through a chain of
# invalid aspect ratio masked_triangles.
ntri = self._triangulation.triangles.shape[0]
mask_bad_ratio = self.circle_ratios(rescale) < min_circle_ratio
current_mask = self._triangulation.mask
if current_mask is None:
current_mask = np.zeros(ntri, dtype=np.bool)
valid_neighbors = np.copy(self._triangulation.neighbors)
renum_neighbors = np.arange(ntri, dtype=np.int32)
nadd = -1
while nadd != 0:
# The active wavefront is the triangles from the border (unmasked
# but with a least 1 neighbor equal to -1
wavefront = ((np.min(valid_neighbors, axis=1) == -1)
& ~current_mask)
# The element from the active wavefront will be masked if their
# circle ratio is bad.
added_mask = np.logical_and(wavefront, mask_bad_ratio)
current_mask = (added_mask | current_mask)
nadd = np.sum(added_mask)
# now we have to update the tables valid_neighbors
valid_neighbors[added_mask, :] = -1
renum_neighbors[added_mask] = -1
valid_neighbors = np.where(valid_neighbors == -1, -1,
renum_neighbors[valid_neighbors])
return np.ma.filled(current_mask, True)
def _get_compressed_triangulation(self, return_tri_renum=False,
return_node_renum=False):
"""
Compress (if masked) the encapsulated triangulation.
Returns minimal-length triangles array (*compressed_triangles*) and
coordinates arrays (*compressed_x*, *compressed_y*) that can still
describe the unmasked triangles of the encapsulated triangulation.
Parameters
----------
return_tri_renum : boolean, optional
Indicates whether a renumbering table to translate the triangle
numbers from the encapsulated triangulation numbering into the
new (compressed) renumbering will be returned.
return_node_renum : boolean, optional
Indicates whether a renumbering table to translate the nodes
numbers from the encapsulated triangulation numbering into the
new (compressed) renumbering will be returned.
Returns
-------
compressed_triangles : array-like
the returned compressed triangulation triangles
compressed_x : array-like
the returned compressed triangulation 1st coordinate
compressed_y : array-like
the returned compressed triangulation 2nd coordinate
tri_renum : array-like of integers
renumbering table to translate the triangle numbers from the
encapsulated triangulation into the new (compressed) renumbering.
-1 for masked triangles (deleted from *compressed_triangles*).
Returned only if *return_tri_renum* is True.
node_renum : array-like of integers
renumbering table to translate the point numbers from the
encapsulated triangulation into the new (compressed) renumbering.
-1 for unused points (i.e. those deleted from *compressed_x* and
*compressed_y*). Returned only if *return_node_renum* is True.
"""
# Valid triangles and renumbering
tri_mask = self._triangulation.mask
compressed_triangles = self._triangulation.get_masked_triangles()
ntri = self._triangulation.triangles.shape[0]
tri_renum = self._total_to_compress_renum(tri_mask, ntri)
# Valid nodes and renumbering
node_mask = (np.bincount(np.ravel(compressed_triangles),
minlength=self._triangulation.x.size) == 0)
compressed_x = self._triangulation.x[~node_mask]
compressed_y = self._triangulation.y[~node_mask]
node_renum = self._total_to_compress_renum(node_mask)
# Now renumbering the valid triangles nodes
compressed_triangles = node_renum[compressed_triangles]
# 4 cases possible for return
if not return_tri_renum:
if not return_node_renum:
return compressed_triangles, compressed_x, compressed_y
else:
return (compressed_triangles, compressed_x, compressed_y,
node_renum)
else:
if not return_node_renum:
return (compressed_triangles, compressed_x, compressed_y,
tri_renum)
else:
return (compressed_triangles, compressed_x, compressed_y,
tri_renum, node_renum)
@staticmethod
def _total_to_compress_renum(mask, n=None):
"""
Parameters
----------
mask : 1d boolean array or None
mask
n : integer
length of the mask. Useful only id mask can be None
Returns
-------
renum : integer array
array so that (`valid_array` being a compressed array
based on a `masked_array` with mask *mask*) :
- For all i such as mask[i] = False:
valid_array[renum[i]] = masked_array[i]
- For all i such as mask[i] = True:
renum[i] = -1 (invalid value)
"""
if n is None:
n = np.size(mask)
if mask is not None:
renum = -np.ones(n, dtype=np.int32) # Default num is -1
valid = np.arange(n, dtype=np.int32).compress(~mask, axis=0)
renum[valid] = np.arange(np.size(valid, 0), dtype=np.int32)
return renum
else:
return np.arange(n, dtype=np.int32)
| bsd-3-clause |
braghiere/JULESv4.6_clump | examples/harvard/output/plot.py | 1 | 1699 | import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
can_rad_mod_5 = Dataset('harvard.test_leaf_spherical_can_rad_mod_5.output.nc', 'r')
can_rad_mod_5_a_05 = Dataset('harvard.test_leaf_spherical_can_rad_mod_5_a_05.output.nc', 'r')
print(can_rad_mod_5)
time = can_rad_mod_5.variables['time']
print(time)
print time[:]
sensensible_heat_5_no = can_rad_mod_5.variables['ftl_gb']
latent_heat_5_no = can_rad_mod_5.variables['latent_heat']
gpp_5_no = can_rad_mod_5.variables['gpp_gb']
gpp_5_no = gpp_5_no[:]*1e8
sensensible_heat_5 = can_rad_mod_5_a_05.variables['ftl_gb']
latent_heat_5 = can_rad_mod_5_a_05.variables['latent_heat']
gpp_5 = can_rad_mod_5_a_05.variables['gpp_gb']
gpp_5 = gpp_5[:]*1e8
plt.scatter(time,sensensible_heat_5_no,marker='.',color='k',s=30,label='H no structure')
plt.scatter(time,latent_heat_5_no,marker='^',color='k',s=30,label='LE no structure')
plt.scatter(time,sensensible_heat_5,marker='.',color='r',s=30,label='H structure')
plt.scatter(time,latent_heat_5,marker='^',color='r',s=30,label='LE structure')
plt.legend()
plt.ylabel('Energy Flux ')
plt.xlabel(time.units)
plt.show()
plt.scatter(sensensible_heat_5,sensensible_heat_5_no,marker='^',color='k',s=30)
plt.plot([-5,2,3,4,25],[-5,2,3,4,25])
plt.ylabel('No structure')
plt.xlabel('Structure')
plt.legend()
plt.show()
plt.scatter(time,gpp_5_no,marker='^',color='k',s=30,label='no structure')
plt.scatter(time,gpp_5,marker='.',color='r',s=30,label='structure')
plt.legend()
plt.show()
plt.scatter(gpp_5,gpp_5_no,marker='^',color='k',s=30)
plt.plot([-5,2,3,4,25],[-5,2,3,4,25])
plt.ylabel('No structure')
plt.xlabel('Structure')
plt.legend()
plt.show()
can_rad_mod_5.close()
| gpl-2.0 |
kernc/scikit-learn | examples/svm/plot_weighted_samples.py | 95 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
QuLogic/cartopy | lib/cartopy/io/ogc_clients.py | 2 | 34553 | # Copyright Cartopy Contributors
#
# This file is part of Cartopy and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Implements RasterSource classes which can retrieve imagery from web services
such as WMS and WMTS.
The matplotlib interface can make use of RasterSources via the
:meth:`cartopy.mpl.geoaxes.GeoAxes.add_raster` method,
with additional specific methods which make use of this for WMS and WMTS
(:meth:`~cartopy.mpl.geoaxes.GeoAxes.add_wms` and
:meth:`~cartopy.mpl.geoaxes.GeoAxes.add_wmts`). An example of using WMTS in
this way can be found at :ref:`sphx_glr_gallery_web_services_wmts.py`.
"""
import collections
import io
import math
import warnings
import weakref
from xml.etree import ElementTree
from PIL import Image
import numpy as np
import shapely.geometry as sgeom
try:
from owslib.wms import WebMapService
from owslib.wfs import WebFeatureService
import owslib.util
import owslib.wmts
_OWSLIB_AVAILABLE = True
except ImportError:
WebMapService = None
WebFeatureService = None
_OWSLIB_AVAILABLE = False
import cartopy.crs as ccrs
from cartopy.io import LocatedImage, RasterSource
from cartopy.img_transform import warp_array
_OWSLIB_REQUIRED = 'OWSLib is required to use OGC web services.'
# Hardcode some known EPSG codes for now.
# The order given here determines the preferred SRS for WMS retrievals.
_CRS_TO_OGC_SRS = collections.OrderedDict(
[(ccrs.PlateCarree(), 'EPSG:4326'),
(ccrs.Mercator.GOOGLE, 'EPSG:900913'),
(ccrs.OSGB(approx=True), 'EPSG:27700')
])
# Standard pixel size of 0.28 mm as defined by WMTS.
METERS_PER_PIXEL = 0.28e-3
_WGS84_METERS_PER_UNIT = 2 * math.pi * 6378137 / 360
METERS_PER_UNIT = {
'urn:ogc:def:crs:EPSG::27700': 1,
'urn:ogc:def:crs:EPSG::900913': 1,
'urn:ogc:def:crs:OGC:1.3:CRS84': _WGS84_METERS_PER_UNIT,
'urn:ogc:def:crs:EPSG::3031': 1,
'urn:ogc:def:crs:EPSG::3413': 1,
'urn:ogc:def:crs:EPSG::3857': 1,
'urn:ogc:def:crs:EPSG:6.18.3:3857': 1
}
_URN_TO_CRS = collections.OrderedDict(
[('urn:ogc:def:crs:OGC:1.3:CRS84', ccrs.PlateCarree()),
('urn:ogc:def:crs:EPSG::4326', ccrs.PlateCarree()),
('urn:ogc:def:crs:EPSG::900913', ccrs.GOOGLE_MERCATOR),
('urn:ogc:def:crs:EPSG::27700', ccrs.OSGB(approx=True)),
('urn:ogc:def:crs:EPSG::3031', ccrs.Stereographic(
central_latitude=-90,
true_scale_latitude=-71)),
('urn:ogc:def:crs:EPSG::3413', ccrs.Stereographic(
central_longitude=-45,
central_latitude=90,
true_scale_latitude=70)),
('urn:ogc:def:crs:EPSG::3857', ccrs.GOOGLE_MERCATOR),
('urn:ogc:def:crs:EPSG:6.18.3:3857', ccrs.GOOGLE_MERCATOR)
])
# XML namespace definitions
_MAP_SERVER_NS = '{http://mapserver.gis.umn.edu/mapserver}'
_GML_NS = '{http://www.opengis.net/gml}'
def _warped_located_image(image, source_projection, source_extent,
output_projection, output_extent, target_resolution):
"""
Reproject an Image from one source-projection and extent to another.
Returns
-------
LocatedImage
A reprojected LocatedImage, the extent of which is >= the requested
'output_extent'.
"""
if source_projection == output_projection:
extent = output_extent
else:
# Convert Image to numpy array (flipping so that origin
# is 'lower').
# Convert to RGBA to keep the color palette in the regrid process
# if any
img, extent = warp_array(np.asanyarray(image.convert('RGBA'))[::-1],
source_proj=source_projection,
source_extent=source_extent,
target_proj=output_projection,
target_res=np.asarray(target_resolution,
dtype=int),
target_extent=output_extent,
mask_extrapolated=True)
# Convert arrays with masked RGB(A) values to non-masked RGBA
# arrays, setting the alpha channel to zero for masked values.
# This avoids unsightly grey boundaries appearing when the
# extent is limited (i.e. not global).
if np.ma.is_masked(img):
img[:, :, 3] = np.where(np.any(img.mask, axis=2), 0,
img[:, :, 3])
img = img.data
# Convert warped image array back to an Image, undoing the
# earlier flip.
image = Image.fromarray(img[::-1])
return LocatedImage(image, extent)
def _target_extents(extent, requested_projection, available_projection):
"""
Translate the requested extent in the display projection into a list of
extents in the projection available from the service (multiple if it
crosses seams).
The extents are represented as (min_x, max_x, min_y, max_y).
"""
# Start with the requested area.
min_x, max_x, min_y, max_y = extent
target_box = sgeom.box(min_x, min_y, max_x, max_y)
# If the requested area (i.e. target_box) is bigger (or nearly bigger) than
# the entire output requested_projection domain, then we erode the request
# area to avoid re-projection instabilities near the projection boundary.
buffered_target_box = target_box.buffer(requested_projection.threshold,
resolution=1)
fudge_mode = buffered_target_box.contains(requested_projection.domain)
if fudge_mode:
target_box = requested_projection.domain.buffer(
-requested_projection.threshold)
# Translate the requested area into the server projection.
polys = available_projection.project_geometry(target_box,
requested_projection)
# Return the polygons' rectangular bounds as extent tuples.
target_extents = []
for poly in polys:
min_x, min_y, max_x, max_y = poly.bounds
if fudge_mode:
# If we shrunk the request area before, then here we
# need to re-inflate.
radius = min(max_x - min_x, max_y - min_y) / 5.0
radius = min(radius, available_projection.threshold * 15)
poly = poly.buffer(radius, resolution=1)
# Prevent the expanded request going beyond the
# limits of the requested_projection.
poly = available_projection.domain.intersection(poly)
min_x, min_y, max_x, max_y = poly.bounds
target_extents.append((min_x, max_x, min_y, max_y))
return target_extents
class WMSRasterSource(RasterSource):
"""
A WMS imagery retriever which can be added to a map.
Note
----
Requires owslib and Pillow to work.
No caching of retrieved maps is done with this WMSRasterSource.
To reduce load on the WMS server it is encouraged to tile
map requests and subsequently stitch them together to recreate
a single raster, thus allowing for a more aggressive caching scheme,
but this WMSRasterSource does not currently implement WMS tile
fetching.
Whilst not the same service, there is also a WMTSRasterSource which
makes use of tiles and comes with built-in caching for fast repeated
map retrievals.
"""
def __init__(self, service, layers, getmap_extra_kwargs=None):
"""
Parameters
----------
service: string or WebMapService instance
The WebMapService instance, or URL of a WMS service,
from whence to retrieve the image.
layers: string or list of strings
The name(s) of layers to use from the WMS service.
getmap_extra_kwargs: dict, optional
Extra keywords to pass through to the service's getmap method.
If None, a dictionary with ``{'transparent': True}`` will be
defined.
"""
if WebMapService is None:
raise ImportError(_OWSLIB_REQUIRED)
if isinstance(service, str):
service = WebMapService(service)
if isinstance(layers, str):
layers = [layers]
if getmap_extra_kwargs is None:
getmap_extra_kwargs = {'transparent': True}
if len(layers) == 0:
raise ValueError('One or more layers must be defined.')
for layer in layers:
if layer not in service.contents:
raise ValueError('The {!r} layer does not exist in '
'this service.'.format(layer))
#: The OWSLib WebMapService instance.
self.service = service
#: The names of the layers to fetch.
self.layers = layers
#: Extra kwargs passed through to the service's getmap request.
self.getmap_extra_kwargs = getmap_extra_kwargs
def _native_srs(self, projection):
# Return the SRS which corresponds to the given projection when
# known, otherwise return None.
return _CRS_TO_OGC_SRS.get(projection)
def _fallback_proj_and_srs(self):
"""
Return a :class:`cartopy.crs.Projection` and corresponding
SRS string in which the WMS service can supply the requested
layers.
"""
contents = self.service.contents
for proj, srs in _CRS_TO_OGC_SRS.items():
missing = any(srs not in contents[layer].crsOptions for
layer in self.layers)
if not missing:
break
if missing:
raise ValueError('The requested layers are not available in a '
'known SRS.')
return proj, srs
def validate_projection(self, projection):
if self._native_srs(projection) is None:
self._fallback_proj_and_srs()
def _image_and_extent(self, wms_proj, wms_srs, wms_extent, output_proj,
output_extent, target_resolution):
min_x, max_x, min_y, max_y = wms_extent
wms_image = self.service.getmap(layers=self.layers,
srs=wms_srs,
bbox=(min_x, min_y, max_x, max_y),
size=target_resolution,
format='image/png',
**self.getmap_extra_kwargs)
wms_image = Image.open(io.BytesIO(wms_image.read()))
return _warped_located_image(wms_image, wms_proj, wms_extent,
output_proj, output_extent,
target_resolution)
def fetch_raster(self, projection, extent, target_resolution):
target_resolution = [math.ceil(val) for val in target_resolution]
wms_srs = self._native_srs(projection)
if wms_srs is not None:
wms_proj = projection
wms_extents = [extent]
else:
# The SRS for the requested projection is not known, so
# attempt to use the fallback and perform the necessary
# transformations.
wms_proj, wms_srs = self._fallback_proj_and_srs()
# Calculate the bounding box(es) in WMS projection.
wms_extents = _target_extents(extent, projection, wms_proj)
located_images = []
for wms_extent in wms_extents:
located_images.append(self._image_and_extent(wms_proj, wms_srs,
wms_extent,
projection, extent,
target_resolution))
return located_images
class WMTSRasterSource(RasterSource):
"""
A WMTS imagery retriever which can be added to a map.
Uses tile caching for fast repeated map retrievals.
Note
----
Requires owslib and Pillow to work.
"""
_shared_image_cache = weakref.WeakKeyDictionary()
"""
A nested mapping from WMTS, layer name, tile matrix name, tile row
and tile column to the resulting PIL image::
{wmts: {(layer_name, tile_matrix_name): {(row, column): Image}}}
This provides a significant boost when producing multiple maps of the
same projection or with an interactive figure.
"""
def __init__(self, wmts, layer_name, gettile_extra_kwargs=None):
"""
Parameters
----------
wmts
The URL of the WMTS, or an owslib.wmts.WebMapTileService instance.
layer_name
The name of the layer to use.
gettile_extra_kwargs: dict, optional
Extra keywords (e.g. time) to pass through to the
service's gettile method.
"""
if WebMapService is None:
raise ImportError(_OWSLIB_REQUIRED)
if not (hasattr(wmts, 'tilematrixsets') and
hasattr(wmts, 'contents') and
hasattr(wmts, 'gettile')):
wmts = owslib.wmts.WebMapTileService(wmts)
try:
layer = wmts.contents[layer_name]
except KeyError:
raise ValueError('Invalid layer name {!r} for WMTS at {!r}'.format(
layer_name, wmts.url))
#: The OWSLib WebMapTileService instance.
self.wmts = wmts
#: The layer to fetch.
self.layer = layer
#: Extra kwargs passed through to the service's gettile request.
if gettile_extra_kwargs is None:
gettile_extra_kwargs = {}
self.gettile_extra_kwargs = gettile_extra_kwargs
self._matrix_set_name_map = {}
def _matrix_set_name(self, target_projection):
key = id(target_projection)
matrix_set_name = self._matrix_set_name_map.get(key)
if matrix_set_name is None:
if hasattr(self.layer, 'tilematrixsetlinks'):
matrix_set_names = self.layer.tilematrixsetlinks.keys()
else:
matrix_set_names = self.layer.tilematrixsets
def find_projection(match_projection):
result = None
for tile_matrix_set_name in matrix_set_names:
matrix_sets = self.wmts.tilematrixsets
tile_matrix_set = matrix_sets[tile_matrix_set_name]
crs_urn = tile_matrix_set.crs
tms_crs = _URN_TO_CRS.get(crs_urn)
if tms_crs == match_projection:
result = tile_matrix_set_name
break
return result
# First search for a matrix set in the target projection.
matrix_set_name = find_projection(target_projection)
if matrix_set_name is None:
# Search instead for a set in _any_ projection we can use.
for possible_projection in _URN_TO_CRS.values():
# Look for supported projections (in a preferred order).
matrix_set_name = find_projection(possible_projection)
if matrix_set_name is not None:
break
if matrix_set_name is None:
# Fail completely.
available_urns = sorted({
self.wmts.tilematrixsets[name].crs
for name in matrix_set_names})
msg = 'Unable to find tile matrix for projection.'
msg += '\n Projection: ' + str(target_projection)
msg += '\n Available tile CRS URNs:'
msg += '\n ' + '\n '.join(available_urns)
raise ValueError(msg)
self._matrix_set_name_map[key] = matrix_set_name
return matrix_set_name
def validate_projection(self, projection):
self._matrix_set_name(projection)
def fetch_raster(self, projection, extent, target_resolution):
matrix_set_name = self._matrix_set_name(projection)
wmts_projection = _URN_TO_CRS[
self.wmts.tilematrixsets[matrix_set_name].crs]
if wmts_projection == projection:
wmts_extents = [extent]
else:
# Calculate (possibly multiple) extents in the given projection.
wmts_extents = _target_extents(extent, projection, wmts_projection)
# Bump resolution by a small factor, as a weak alternative to
# delivering a minimum projected resolution.
# Generally, the desired area is smaller than the enclosing extent
# in projection space and may have varying scaling, so the ideal
# solution is a hard problem !
resolution_factor = 1.4
target_resolution = np.array(target_resolution) * resolution_factor
width, height = target_resolution
located_images = []
for wmts_desired_extent in wmts_extents:
# Calculate target resolution for the actual polygon. Note that
# this gives *every* polygon enough pixels for the whole result,
# which is potentially excessive!
min_x, max_x, min_y, max_y = wmts_desired_extent
if wmts_projection == projection:
max_pixel_span = min((max_x - min_x) / width,
(max_y - min_y) / height)
else:
# X/Y orientation is arbitrary, so use a worst-case guess.
max_pixel_span = (min(max_x - min_x, max_y - min_y) /
max(width, height))
# Fetch a suitable image and its actual extent.
wmts_image, wmts_actual_extent = self._wmts_images(
self.wmts, self.layer, matrix_set_name,
extent=wmts_desired_extent,
max_pixel_span=max_pixel_span)
# Return each (image, extent) as a LocatedImage.
if wmts_projection == projection:
located_image = LocatedImage(wmts_image, wmts_actual_extent)
else:
# Reproject the image to the desired projection.
located_image = _warped_located_image(
wmts_image,
wmts_projection, wmts_actual_extent,
output_projection=projection, output_extent=extent,
target_resolution=target_resolution)
located_images.append(located_image)
return located_images
def _choose_matrix(self, tile_matrices, meters_per_unit, max_pixel_span):
# Get the tile matrices in order of increasing resolution.
tile_matrices = sorted(tile_matrices,
key=lambda tm: tm.scaledenominator,
reverse=True)
# Find which tile matrix has the appropriate resolution.
max_scale = max_pixel_span * meters_per_unit / METERS_PER_PIXEL
for tm in tile_matrices:
if tm.scaledenominator <= max_scale:
return tm
return tile_matrices[-1]
def _tile_span(self, tile_matrix, meters_per_unit):
pixel_span = (tile_matrix.scaledenominator *
(METERS_PER_PIXEL / meters_per_unit))
tile_span_x = tile_matrix.tilewidth * pixel_span
tile_span_y = tile_matrix.tileheight * pixel_span
return tile_span_x, tile_span_y
def _select_tiles(self, tile_matrix, tile_matrix_limits,
tile_span_x, tile_span_y, extent):
# Convert the requested extent from CRS coordinates to tile
# indices. See annex H of the WMTS v1.0.0 spec.
# NB. The epsilons get rid of any tiles which only just
# (i.e. one part in a million) intrude into the requested
# extent. Since these wouldn't be visible anyway there's nothing
# to be gained by spending the time downloading them.
min_x, max_x, min_y, max_y = extent
matrix_min_x, matrix_max_y = tile_matrix.topleftcorner
epsilon = 1e-6
min_col = int((min_x - matrix_min_x) / tile_span_x + epsilon)
max_col = int((max_x - matrix_min_x) / tile_span_x - epsilon)
min_row = int((matrix_max_y - max_y) / tile_span_y + epsilon)
max_row = int((matrix_max_y - min_y) / tile_span_y - epsilon)
# Clamp to the limits of the tile matrix.
min_col = max(min_col, 0)
max_col = min(max_col, tile_matrix.matrixwidth - 1)
min_row = max(min_row, 0)
max_row = min(max_row, tile_matrix.matrixheight - 1)
# Clamp to any layer-specific limits on the tile matrix.
if tile_matrix_limits:
min_col = max(min_col, tile_matrix_limits.mintilecol)
max_col = min(max_col, tile_matrix_limits.maxtilecol)
min_row = max(min_row, tile_matrix_limits.mintilerow)
max_row = min(max_row, tile_matrix_limits.maxtilerow)
return min_col, max_col, min_row, max_row
def _wmts_images(self, wmts, layer, matrix_set_name, extent,
max_pixel_span):
"""
Add images from the specified WMTS layer and matrix set to cover
the specified extent at an appropriate resolution.
The zoom level (aka. tile matrix) is chosen to give the lowest
possible resolution which still provides the requested quality.
If insufficient resolution is available, the highest available
resolution is used.
Parameters
----------
wmts
The owslib.wmts.WebMapTileService providing the tiles.
layer
The owslib.wmts.ContentMetadata (aka. layer) to draw.
matrix_set_name
The name of the matrix set to use.
extent
Tuple of (left, right, bottom, top) in Axes coordinates.
max_pixel_span
Preferred maximum pixel width or height in Axes coordinates.
"""
# Find which tile matrix has the appropriate resolution.
tile_matrix_set = wmts.tilematrixsets[matrix_set_name]
tile_matrices = tile_matrix_set.tilematrix.values()
meters_per_unit = METERS_PER_UNIT[tile_matrix_set.crs]
tile_matrix = self._choose_matrix(tile_matrices, meters_per_unit,
max_pixel_span)
# Determine which tiles are required to cover the requested extent.
tile_span_x, tile_span_y = self._tile_span(tile_matrix,
meters_per_unit)
tile_matrix_set_links = getattr(layer, 'tilematrixsetlinks', None)
if tile_matrix_set_links is None:
tile_matrix_limits = None
else:
tile_matrix_set_link = tile_matrix_set_links[matrix_set_name]
tile_matrix_limits = tile_matrix_set_link.tilematrixlimits.get(
tile_matrix.identifier)
min_col, max_col, min_row, max_row = self._select_tiles(
tile_matrix, tile_matrix_limits, tile_span_x, tile_span_y, extent)
# Find the relevant section of the image cache.
tile_matrix_id = tile_matrix.identifier
cache_by_wmts = WMTSRasterSource._shared_image_cache
cache_by_layer_matrix = cache_by_wmts.setdefault(wmts, {})
image_cache = cache_by_layer_matrix.setdefault((layer.id,
tile_matrix_id), {})
# To avoid nasty seams between the individual tiles, we
# accumulate the tile images into a single image.
big_img = None
n_rows = 1 + max_row - min_row
n_cols = 1 + max_col - min_col
# Ignore out-of-range errors if the current version of OWSLib
# doesn't provide the regional information.
ignore_out_of_range = tile_matrix_set_links is None
for row in range(min_row, max_row + 1):
for col in range(min_col, max_col + 1):
# Get the tile's Image from the cache if possible.
img_key = (row, col)
img = image_cache.get(img_key)
if img is None:
try:
tile = wmts.gettile(
layer=layer.id,
tilematrixset=matrix_set_name,
tilematrix=str(tile_matrix_id),
row=str(row), column=str(col),
**self.gettile_extra_kwargs)
except owslib.util.ServiceException as exception:
if ('TileOutOfRange' in exception.message and
ignore_out_of_range):
continue
raise exception
img = Image.open(io.BytesIO(tile.read()))
image_cache[img_key] = img
if big_img is None:
size = (img.size[0] * n_cols, img.size[1] * n_rows)
big_img = Image.new('RGBA', size, (255, 255, 255, 255))
top = (row - min_row) * tile_matrix.tileheight
left = (col - min_col) * tile_matrix.tilewidth
big_img.paste(img, (left, top))
if big_img is None:
img_extent = None
else:
matrix_min_x, matrix_max_y = tile_matrix.topleftcorner
min_img_x = matrix_min_x + tile_span_x * min_col
max_img_y = matrix_max_y - tile_span_y * min_row
img_extent = (min_img_x, min_img_x + n_cols * tile_span_x,
max_img_y - n_rows * tile_span_y, max_img_y)
return big_img, img_extent
class WFSGeometrySource:
"""Web Feature Service (WFS) retrieval for Cartopy."""
def __init__(self, service, features, getfeature_extra_kwargs=None):
"""
Parameters
----------
service
The URL of a WFS, or an instance of
:class:`owslib.wfs.WebFeatureService`.
features
The typename(s) of the features from the WFS that
will be retrieved and made available as geometries.
getfeature_extra_kwargs: optional
Extra keyword args to pass to the service's `getfeature` call.
Defaults to None
"""
if WebFeatureService is None:
raise ImportError(_OWSLIB_REQUIRED)
if isinstance(service, str):
service = WebFeatureService(service)
if isinstance(features, str):
features = [features]
if getfeature_extra_kwargs is None:
getfeature_extra_kwargs = {}
if not features:
raise ValueError('One or more features must be specified.')
for feature in features:
if feature not in service.contents:
raise ValueError('The {!r} feature does not exist in this '
'service.'.format(feature))
self.service = service
self.features = features
self.getfeature_extra_kwargs = getfeature_extra_kwargs
self._default_urn = None
def default_projection(self):
"""
Return a :class:`cartopy.crs.Projection` in which the WFS
service can supply the requested features.
"""
# Using first element in crsOptions (default).
if self._default_urn is None:
default_urn = {self.service.contents[feature].crsOptions[0] for
feature in self.features}
if len(default_urn) != 1:
ValueError('Failed to find a single common default SRS '
'across all features (typenames).')
else:
default_urn = default_urn.pop()
if str(default_urn) not in _URN_TO_CRS:
raise ValueError('Unknown mapping from SRS/CRS_URN {!r} to '
'cartopy projection.'.format(default_urn))
self._default_urn = default_urn
return _URN_TO_CRS[str(self._default_urn)]
def fetch_geometries(self, projection, extent):
"""
Return any Point, Linestring or LinearRing geometries available
from the WFS that lie within the specified extent.
Parameters
----------
projection: :class:`cartopy.crs.Projection`
The projection in which the extent is specified and in
which the geometries should be returned. Only the default
(native) projection is supported.
extent: four element tuple
(min_x, max_x, min_y, max_y) tuple defining the geographic extent
of the geometries to obtain.
Returns
-------
geoms
A list of Shapely geometries.
"""
if self.default_projection() != projection:
raise ValueError('Geometries are only available in projection '
'{!r}.'.format(self.default_projection()))
min_x, max_x, min_y, max_y = extent
response = self.service.getfeature(typename=self.features,
bbox=(min_x, min_y, max_x, max_y),
**self.getfeature_extra_kwargs)
geoms_by_srs = self._to_shapely_geoms(response)
if not geoms_by_srs:
geoms = []
elif len(geoms_by_srs) > 1:
raise ValueError('Unexpected response from the WFS server. The '
'geometries are in multiple SRSs, when only one '
'was expected.')
else:
srs, geoms = list(geoms_by_srs.items())[0]
# Attempt to verify the SRS associated with the geometries (if any)
# matches the specified projection.
if srs is not None:
if srs in _URN_TO_CRS:
geom_proj = _URN_TO_CRS[srs]
if geom_proj != projection:
raise ValueError('The geometries are not in expected '
'projection. Expected {!r}, got '
'{!r}.'.format(projection, geom_proj))
else:
msg = 'Unable to verify matching projections due ' \
'to incomplete mappings from SRS identifiers ' \
'to cartopy projections. The geometries have ' \
'an SRS of {!r}.'.format(srs)
warnings.warn(msg)
return geoms
def _to_shapely_geoms(self, response):
"""
Convert polygon coordinate strings in WFS response XML to Shapely
geometries.
Parameters
----------
response: (file-like object)
WFS response XML data.
Returns
-------
geoms_by_srs
A dictionary containing geometries, with key-value pairs of
the form {srsname: [geoms]}.
"""
linear_rings_data = []
linestrings_data = []
points_data = []
tree = ElementTree.parse(response)
for node in tree.findall('.//{}msGeometry'.format(_MAP_SERVER_NS)):
# Find LinearRing geometries in our msGeometry node.
find_str = './/{gml}LinearRing'.format(gml=_GML_NS)
if self._node_has_child(node, find_str):
data = self._find_polygon_coords(node, find_str)
linear_rings_data.extend(data)
# Find LineString geometries in our msGeometry node.
find_str = './/{gml}LineString'.format(gml=_GML_NS)
if self._node_has_child(node, find_str):
data = self._find_polygon_coords(node, find_str)
linestrings_data.extend(data)
# Find Point geometries in our msGeometry node.
find_str = './/{gml}Point'.format(gml=_GML_NS)
if self._node_has_child(node, find_str):
data = self._find_polygon_coords(node, find_str)
points_data.extend(data)
geoms_by_srs = {}
for srs, x, y in linear_rings_data:
geoms_by_srs.setdefault(srs, []).append(
sgeom.LinearRing(zip(x, y)))
for srs, x, y in linestrings_data:
geoms_by_srs.setdefault(srs, []).append(
sgeom.LineString(zip(x, y)))
for srs, x, y in points_data:
geoms_by_srs.setdefault(srs, []).append(
sgeom.Point(zip(x, y)))
return geoms_by_srs
def _find_polygon_coords(self, node, find_str):
"""
Return the x, y coordinate values for all the geometries in
a given`node`.
Parameters
----------
node: :class:`xml.etree.ElementTree.Element`
Node of the parsed XML response.
find_str: string
A search string used to match subelements that contain
the coordinates of interest, for example:
'.//{http://www.opengis.net/gml}LineString'
Returns
-------
data
A list of (srsName, x_vals, y_vals) tuples.
"""
data = []
for polygon in node.findall(find_str):
feature_srs = polygon.attrib.get('srsName')
x, y = [], []
# We can have nodes called `coordinates` or `coord`.
coordinates_find_str = '{}coordinates'.format(_GML_NS)
coords_find_str = '{}coord'.format(_GML_NS)
if self._node_has_child(polygon, coordinates_find_str):
points = polygon.findtext(coordinates_find_str)
coords = points.strip().split(' ')
for coord in coords:
x_val, y_val = coord.split(',')
x.append(float(x_val))
y.append(float(y_val))
elif self._node_has_child(polygon, coords_find_str):
for coord in polygon.findall(coords_find_str):
x.append(float(coord.findtext('{}X'.format(_GML_NS))))
y.append(float(coord.findtext('{}Y'.format(_GML_NS))))
else:
raise ValueError('Unable to find or parse coordinate values '
'from the XML.')
data.append((feature_srs, x, y))
return data
@staticmethod
def _node_has_child(node, find_str):
"""
Return whether `node` contains (at any sub-level), a node with name
equal to `find_str`.
"""
element = node.find(find_str)
return element is not None
| lgpl-3.0 |
dmytroKarataiev/MachineLearning | learning/algorithms/svm/svm.py | 1 | 1035 | import sys
from learning.algorithms.prep_terrain_data import makeTerrainData
from learning.algorithms.class_vis import prettyPicture, output_image
import matplotlib.pyplot as plt
import copy
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
########################## SVM #################################
### we handle the import statement and SVC creation for you here
from sklearn.svm import SVC
clf = SVC(kernel="linear", gamma=1.0)
#### now your job is to fit the classifier
#### using the training features/labels, and to
#### make a set of predictions on the test data
clf.fit(features_train, labels_train)
#### store your predictions in a list named pred
pred = clf.predict(features_test)
from sklearn.metrics import accuracy_score
acc = accuracy_score(pred, labels_test)
def submitAccuracy():
prettyPicture(clf, features_test, labels_test)
output_image("test.png", "png", open("test.png", "rb").read())
return acc
print submitAccuracy()
| mit |
ngoix/OCRF | sklearn/svm/classes.py | 1 | 40683 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
from ..utils import timeout, max_time
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while ``"crammer_singer"``
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
@timeout(max_time)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
@timeout(max_time)
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
DamCB/tyssue | tests/geometry/test_bulkgeometry.py | 2 | 3672 | import pandas as pd
from tyssue import config
from tyssue.core import Epithelium
from tyssue.generation import three_faces_sheet, extrude
from tyssue.geometry.bulk_geometry import BulkGeometry
def test_bulk_update_vol():
datasets_2d, _ = three_faces_sheet(zaxis=True)
datasets = extrude(datasets_2d, method="translation")
specs = config.geometry.bulk_spec()
eptm = Epithelium("test_volume", datasets, specs, coords=["x", "y", "z"])
BulkGeometry.update_all(eptm)
expected_cell_df = pd.DataFrame.from_dict(
{
"cell": [0, 1, 2],
"x": [0.5, -1.0, 0.5],
"y": [8.660000e-01, -6.167906e-18, -8.6600000e-01],
"z": [-0.5, -0.5, -0.5],
"is_alive": [True, True, True],
"num_faces": [8, 8, 8],
"vol": [2.598, 2.598, 2.598],
}
).set_index("cell")
expected_face_centroids = pd.DataFrame.from_dict(
{
"face": list(range(24)),
"x": [
0.5,
-1.0,
0.5,
0.5,
-1.0,
0.5,
0.5,
1.25,
1.25,
0.5,
-0.25,
-0.25,
-0.25,
-1.0,
-1.75,
-1.75,
-1.0,
-0.25,
-0.25,
-0.25,
0.5,
1.25,
1.25,
0.5,
],
"y": [
0.86599999999999999,
0.0,
-0.86599999999999999,
0.86599999999999999,
0.0,
-0.86599999999999999,
0.0,
0.433,
1.2989999999999999,
1.732,
1.2989999999999999,
0.433,
0.433,
0.86599999999999999,
0.433,
-0.433,
-0.86599999999999999,
-0.433,
-0.433,
-1.2989999999999999,
-1.732,
-1.2989999999999999,
-0.433,
0.0,
],
"z": [
0.0,
0.0,
0.0,
-1.0,
-1.0,
-1.0,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
-0.5,
],
}
).set_index("face")
## only update class methods in BulkGeometry : update_vol, update_centroids
tolerance = 1e-16
## check volumes
assert all((expected_cell_df["vol"] - eptm.cell_df["vol"]) ** 2 < tolerance)
## check centroids
assert all(
(expected_face_centroids - eptm.face_df.loc[:, ["x", "y", "z"]]) ** 2
< tolerance
)
def test_mono_update_perimeters():
datasets_2d, _ = three_faces_sheet(zaxis=True)
datasets = extrude(datasets_2d, method="translation")
specs = config.geometry.bulk_spec()
eptm = Epithelium("test_volume", datasets, specs, coords=["x", "y", "z"])
# This method requires a column 'subdiv' in the edge_df.
# I'm not sure how to build it or what is expected
# to be found in this column by the method ?
# MonoLayerGeometry.update_all(eptm)
| gpl-3.0 |
lukas/scikit-class | videos/emotion-classifier/train.py | 2 | 3107 | # Import layers
from keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from keras.models import Sequential
from keras.callbacks import Callback
import pandas as pd
import numpy as np
import cv2
import keras
import subprocess
import os
import time
import wandb
from wandb.keras import WandbCallback
run = wandb.init()
config = run.config
# set hyperparameters
config.batch_size = 32
config.num_epochs = 5
input_shape = (48, 48, 1)
class Perf(Callback):
"""Performance callback for logging inference time"""
def __init__(self, testX):
self.testX = testX
def on_epoch_end(self, epoch, logs):
start = time.time()
self.model.predict(self.testX)
end = time.time()
self.model.predict(self.testX[:1])
latency = time.time() - end
wandb.log({"avg_inference_time": (end - start) /
len(self.testX) * 1000, "latency": latency * 1000}, commit=False)
def load_fer2013():
"""Load the emotion dataset"""
if not os.path.exists("fer2013"):
print("Downloading the face emotion dataset...")
subprocess.check_output(
"curl -SL https://www.dropbox.com/s/opuvvdv3uligypx/fer2013.tar | tar xz", shell=True)
print("Loading dataset...")
data = pd.read_csv("fer2013/fer2013.csv")
pixels = data['pixels'].tolist()
width, height = 48, 48
faces = []
for pixel_sequence in pixels:
face = np.asarray(pixel_sequence.split(
' '), dtype=np.uint8).reshape(width, height)
face = cv2.resize(face.astype('uint8'), (width, height))
faces.append(face.astype('float32'))
faces = np.asarray(faces)
faces = np.expand_dims(faces, -1)
emotions = pd.get_dummies(data['emotion']).as_matrix()
val_faces = faces[int(len(faces) * 0.8):]
val_emotions = emotions[int(len(faces) * 0.8):]
train_faces = faces[:int(len(faces) * 0.8)]
train_emotions = emotions[:int(len(faces) * 0.8)]
return train_faces, train_emotions, val_faces, val_emotions
# loading dataset
train_faces, train_emotions, val_faces, val_emotions = load_fer2013()
num_samples, num_classes = train_emotions.shape
train_faces /= 255.
val_faces /= 255.
# Define the model here, CHANGEME
model = Sequential()
model.add(Conv2D(32, (3,3), activation="relu", input_shape=input_shape))
model.add(MaxPooling2D())
model.add(Conv2D(64, (3,3), activation="relu"))
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dense(num_classes, activation="softmax"))
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
# log the number of total parameters
config.total_params = model.count_params()
model.fit(train_faces, train_emotions, batch_size=config.batch_size,
epochs=config.num_epochs, verbose=1, callbacks=[
Perf(val_faces),
WandbCallback(data_type="image", labels=[
"Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"])
], validation_data=(val_faces, val_emotions))
# save the model
model.save("emotion.h5")
| gpl-2.0 |
simpeg/simpeg | tests/em/tdem/test_TDEM_forward_Analytic.py | 1 | 6067 | from __future__ import division, print_function
import unittest
import numpy as np
from SimPEG import Mesh, Maps, SolverLU
from SimPEG import EM
from scipy.constants import mu_0
import matplotlib.pyplot as plt
from pymatsolver import Pardiso as Solver
def halfSpaceProblemAnaDiff(
meshType, srctype="MagDipole",
sig_half=1e-2, rxOffset=50., bounds=None,
plotIt=False, rxType='bz'
):
if bounds is None:
bounds = [1e-5, 1e-3]
if meshType == 'CYL':
cs, ncx, ncz, npad = 5., 30, 10, 15
hx = [(cs, ncx), (cs, npad, 1.3)]
hz = [(cs, npad, -1.3), (cs, ncz), (cs, npad, 1.3)]
mesh = Mesh.CylMesh([hx, 1, hz], '00C')
elif meshType == 'TENSOR':
cs, nc, npad = 20., 13, 5
hx = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)]
hy = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)]
hz = [(cs, npad, -1.3), (cs, nc), (cs, npad, 1.3)]
mesh = Mesh.TensorMesh([hx, hy, hz], 'CCC')
active = mesh.vectorCCz < 0.
actMap = Maps.InjectActiveCells(mesh, active, np.log(1e-8), nC=mesh.nCz)
mapping = Maps.ExpMap(mesh) * Maps.SurjectVertical1D(mesh) * actMap
rx = getattr(EM.TDEM.Rx, 'Point_{}'.format(rxType[:-1]))(
np.array([[rxOffset, 0., 0.]]), np.logspace(-5, -4, 21), rxType[-1]
)
if srctype == "MagDipole":
src = EM.TDEM.Src.MagDipole(
[rx], waveform=EM.TDEM.Src.StepOffWaveform(),
loc=np.array([0., 0., 0.])
)
elif srctype == "CircularLoop":
src = EM.TDEM.Src.CircularLoop(
[rx], waveform=EM.TDEM.Src.StepOffWaveform(),
loc=np.array([0., 0., 0.]), radius=0.1
)
survey = EM.TDEM.Survey([src])
prb = EM.TDEM.Problem3D_b(mesh, sigmaMap=mapping)
prb.Solver = Solver
prb.timeSteps = [(1e-06, 40), (5e-06, 40), (1e-05, 40), (5e-05, 40),
(0.0001, 40), (0.0005, 40)]
sigma = np.ones(mesh.nCz)*1e-8
sigma[active] = sig_half
sigma = np.log(sigma[active])
prb.pair(survey)
if srctype == "MagDipole":
bz_ana = mu_0*EM.Analytics.hzAnalyticDipoleT(rx.locs[0][0]+1e-3,
rx.times, sig_half)
elif srctype == "CircularLoop":
bz_ana = mu_0*EM.Analytics.hzAnalyticDipoleT(13, rx.times, sig_half)
bz_calc = survey.dpred(sigma)
ind = np.logical_and(rx.times > bounds[0], rx.times < bounds[1])
log10diff = (np.linalg.norm(np.log10(np.abs(bz_calc[ind])) -
np.log10(np.abs(bz_ana[ind]))) /
np.linalg.norm(np.log10(np.abs(bz_ana[ind]))))
print(' |bz_ana| = {ana} |bz_num| = {num} |bz_ana-bz_num| = {diff}'.format(
ana=np.linalg.norm(bz_ana), num=np.linalg.norm(bz_calc),
diff=np.linalg.norm(bz_ana-bz_calc)))
print('Difference: {}'.format(log10diff))
if plotIt is True:
plt.loglog(rx.times[bz_calc > 0], bz_calc[bz_calc > 0], 'r',
rx.times[bz_calc < 0], -bz_calc[bz_calc < 0], 'r--')
plt.loglog(rx.times, abs(bz_ana), 'b*')
plt.title('sig_half = {0:e}'.format(sig_half))
plt.show()
return log10diff
class TDEM_SimpleSrcTests(unittest.TestCase):
def test_source(self):
waveform = EM.TDEM.Src.StepOffWaveform()
assert waveform.eval(0.) == 1.
class TDEM_bTests(unittest.TestCase):
def test_analytic_p2_CYL_50_MagDipolem(self):
self.assertTrue(halfSpaceProblemAnaDiff('CYL', rxOffset=50.,
sig_half=1e+2) < 0.01)
def test_analytic_p1_CYL_50_MagDipolem(self):
self.assertTrue(halfSpaceProblemAnaDiff('CYL', rxOffset=50.,
sig_half=1e+1) < 0.01)
def test_analytic_p0_CYL_50_MagDipolem(self):
self.assertTrue(halfSpaceProblemAnaDiff('CYL', rxOffset=50.,
sig_half=1e+0) < 0.01)
def test_analytic_m1_CYL_50_MagDipolem(self):
self.assertTrue(halfSpaceProblemAnaDiff('CYL', rxOffset=50.,
sig_half=1e-1) < 0.01)
def test_analytic_m2_CYL_50_MagDipolem(self):
self.assertTrue(halfSpaceProblemAnaDiff('CYL', rxOffset=50.,
sig_half=1e-2) < 0.01)
def test_analytic_m3_CYL_50_MagDipolem(self):
self.assertTrue(halfSpaceProblemAnaDiff('CYL', rxOffset=50.,
sig_half=1e-3) < 0.02)
def test_analytic_p0_CYL_1m_MagDipole(self):
self.assertTrue(halfSpaceProblemAnaDiff('CYL', rxOffset=1.0,
sig_half=1e+0) < 0.01)
def test_analytic_m1_CYL_1m_MagDipole(self):
self.assertTrue(halfSpaceProblemAnaDiff('CYL', rxOffset=1.0,
sig_half=1e-1) < 0.01)
def test_analytic_m2_CYL_1m_MagDipole(self):
self.assertTrue(halfSpaceProblemAnaDiff('CYL', rxOffset=1.0,
sig_half=1e-2) < 0.01)
def test_analytic_m3_CYL_1m_MagDipole(self):
self.assertTrue(halfSpaceProblemAnaDiff('CYL', rxOffset=1.0,
sig_half=1e-3) < 0.02)
def test_analytic_p0_CYL_0m_CircularLoop(self):
self.assertTrue(halfSpaceProblemAnaDiff(
'CYL', srctype="CircularLoop", rxOffset=.0, sig_half=1e+0) < 0.15
)
def test_analytic_m1_CYL_0m_CircularLoop(self):
self.assertTrue(halfSpaceProblemAnaDiff(
'CYL', srctype="CircularLoop", rxOffset=.0, sig_half=1e-1) < 0.15
)
def test_analytic_m2_CYL_0m_CircularLoop(self):
self.assertTrue(halfSpaceProblemAnaDiff(
'CYL', srctype="CircularLoop", rxOffset=.0, sig_half=1e-2) < 0.15
)
def test_analytic_m3_CYL_0m_CircularLoop(self):
self.assertTrue(halfSpaceProblemAnaDiff(
'CYL', srctype="CircularLoop", rxOffset=.0, sig_half=1e-3) < 0.15
)
if __name__ == '__main__':
unittest.main()
| mit |
louispotok/pandas | pandas/core/dtypes/cast.py | 2 | 41914 | """ routings for casting """
from datetime import datetime, timedelta
import numpy as np
import warnings
from pandas._libs import tslib, lib
from pandas._libs.tslib import iNaT
from pandas.compat import string_types, text_type, PY3
from .common import (_ensure_object, is_bool, is_integer, is_float,
is_complex, is_datetimetz, is_categorical_dtype,
is_datetimelike,
is_extension_type,
is_extension_array_dtype,
is_object_dtype,
is_datetime64tz_dtype, is_datetime64_dtype,
is_datetime64_ns_dtype,
is_timedelta64_dtype, is_timedelta64_ns_dtype,
is_dtype_equal,
is_float_dtype, is_complex_dtype,
is_integer_dtype,
is_datetime_or_timedelta_dtype,
is_bool_dtype, is_scalar,
is_string_dtype, _string_dtypes,
pandas_dtype,
_ensure_int8, _ensure_int16,
_ensure_int32, _ensure_int64,
_NS_DTYPE, _TD_DTYPE, _INT64_DTYPE,
_POSSIBLY_CAST_DTYPES)
from .dtypes import (ExtensionDtype, PandasExtensionDtype, DatetimeTZDtype,
PeriodDtype)
from .generic import (ABCDatetimeIndex, ABCPeriodIndex,
ABCSeries)
from .missing import isna, notna
from .inference import is_list_like
_int8_max = np.iinfo(np.int8).max
_int16_max = np.iinfo(np.int16).max
_int32_max = np.iinfo(np.int32).max
_int64_max = np.iinfo(np.int64).max
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple)):
values = construct_1d_object_array_from_listlike(list(values))
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, '_values'):
values = values._values
values = lib.maybe_convert_objects(values)
return values
def is_nested_object(obj):
"""
return a boolean if we have a nested object, e.g. a Series with 1 or
more Series elements
This may not be necessarily be performant.
"""
if isinstance(obj, ABCSeries) and is_object_dtype(obj):
if any(isinstance(v, ABCSeries) for v in obj.values):
return True
return False
def maybe_downcast_to_dtype(result, dtype):
""" try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
if is_scalar(result):
return result
def trans(x):
return x
if isinstance(dtype, string_types):
if dtype == 'infer':
inferred_type = lib.infer_dtype(_ensure_object(result.ravel()))
if inferred_type == 'boolean':
dtype = 'bool'
elif inferred_type == 'integer':
dtype = 'int64'
elif inferred_type == 'datetime64':
dtype = 'datetime64[ns]'
elif inferred_type == 'timedelta64':
dtype = 'timedelta64[ns]'
# try to upcast here
elif inferred_type == 'floating':
dtype = 'int64'
if issubclass(result.dtype.type, np.number):
def trans(x): # noqa
return x.round()
else:
dtype = 'object'
if isinstance(dtype, string_types):
dtype = np.dtype(dtype)
try:
# don't allow upcasts here (except if empty)
if dtype.kind == result.dtype.kind:
if (result.dtype.itemsize <= dtype.itemsize and
np.prod(result.shape)):
return result
if is_bool_dtype(dtype) or is_integer_dtype(dtype):
# if we don't have any elements, just astype it
if not np.prod(result.shape):
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
# if we have any nulls, then we are done
if (isna(arr).any() or
not np.allclose(arr, trans(arr).astype(dtype), rtol=0)):
return result
# a comparable, e.g. a Decimal may slip in here
elif not isinstance(r[0], (np.integer, np.floating, np.bool, int,
float, bool)):
return result
if (issubclass(result.dtype.type, (np.object_, np.number)) and
notna(result).all()):
new_result = trans(result).astype(dtype)
try:
if np.allclose(new_result, result, rtol=0):
return new_result
except Exception:
# comparison of an object dtype with a number type could
# hit here
if (new_result == result).all():
return new_result
elif (issubclass(dtype.type, np.floating) and
not is_bool_dtype(result.dtype)):
return result.astype(dtype)
# a datetimelike
# GH12821, iNaT is casted to float
elif dtype.kind in ['M', 'm'] and result.dtype.kind in ['i', 'f']:
try:
result = result.astype(dtype)
except Exception:
if dtype.tz:
# convert to datetime and change timezone
from pandas import to_datetime
result = to_datetime(result).tz_localize('utc')
result = result.tz_convert(dtype.tz)
except Exception:
pass
return result
def maybe_upcast_putmask(result, mask, other):
"""
A safe version of putmask that potentially upcasts the result
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : ndarray or scalar
The source array or value
Returns
-------
result : ndarray
changed : boolean
Set to true if the result array was upcasted
"""
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if is_datetimelike(result.dtype):
if is_scalar(other):
if isna(other):
other = result.dtype.type('nat')
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
elif is_integer_dtype(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# try to directly set by expanding our array to full
# length of the boolean
try:
om = other[mask]
om_at = om.astype(result.dtype)
if (om == om_at).all():
new_result = result.values.copy()
new_result[mask] = om_at
result[:] = new_result
return result, False
except Exception:
pass
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if (is_scalar(other) or
(isinstance(other, np.ndarray) and other.ndim < 1)):
if isna(other):
return changeit()
# we have an ndarray and the masking has nans in it
else:
if isna(other[mask]).any():
return changeit()
try:
np.place(result, mask, other)
except Exception:
return changeit()
return result, False
def maybe_promote(dtype, fill_value=np.nan):
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = iNaT
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
# for now: refuse to upcast datetime64
# (this is because datetime64 will not implicitly upconvert
# to object correctly as of numpy 1.6.1)
if isna(fill_value):
fill_value = iNaT
else:
if issubclass(dtype.type, np.datetime64):
try:
fill_value = tslib.Timestamp(fill_value).value
except Exception:
# the proper thing to do here would probably be to upcast
# to object (but numpy 1.6.1 doesn't do this properly)
fill_value = iNaT
elif issubclass(dtype.type, np.timedelta64):
try:
fill_value = tslib.Timedelta(fill_value).value
except Exception:
# as for datetimes, cannot upcast to object
fill_value = iNaT
else:
fill_value = iNaT
elif is_datetimetz(dtype):
if isna(fill_value):
fill_value = iNaT
elif is_extension_array_dtype(dtype) and isna(fill_value):
fill_value = dtype.na_value
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.object_
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
# upcast to prevent overflow
arr = np.asarray(fill_value)
if arr != arr.astype(dtype):
dtype = arr.dtype
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, (np.integer, np.floating)):
dtype = np.complex128
elif fill_value is None:
if is_float_dtype(dtype) or is_complex_dtype(dtype):
fill_value = np.nan
elif is_integer_dtype(dtype):
dtype = np.float64
fill_value = np.nan
elif is_datetime_or_timedelta_dtype(dtype):
fill_value = iNaT
else:
dtype = np.object_
fill_value = np.nan
else:
dtype = np.object_
# in case we have a string that looked like a number
if is_extension_array_dtype(dtype):
pass
elif is_datetimetz(dtype):
pass
elif issubclass(np.dtype(dtype).type, string_types):
dtype = np.object_
return dtype, fill_value
def infer_dtype_from(val, pandas_dtype=False):
"""
interpret the dtype from a scalar or array. This is a convenience
routines to infer dtype from a scalar or an array
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
def infer_dtype_from_scalar(val, pandas_dtype=False):
"""
interpret the dtype from a scalar
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype = np.object_
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to _infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, string_types):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.object_
elif isinstance(val, (np.datetime64, datetime)):
val = tslib.Timestamp(val)
if val is tslib.NaT or val.tz is None:
dtype = np.dtype('M8[ns]')
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit='ns', tz=val.tz)
else:
# return datetimetz as object
return np.object_, val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = tslib.Timedelta(val).value
dtype = np.dtype('m8[ns]')
elif is_bool(val):
dtype = np.bool_
elif is_integer(val):
if isinstance(val, np.integer):
dtype = type(val)
else:
dtype = np.int64
elif is_float(val):
if isinstance(val, np.floating):
dtype = type(val)
else:
dtype = np.float64
elif is_complex(val):
dtype = np.complex_
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
val = val.ordinal
return dtype, val
def infer_dtype_from_array(arr, pandas_dtype=False):
"""
infer the dtype from a scalar or array
Parameters
----------
arr : scalar or array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(numpy.object_, [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_type(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr)
if inferred in ['string', 'bytes', 'unicode',
'mixed', 'mixed-integer']:
return (np.object_, arr)
arr = np.asarray(arr)
return arr.dtype, arr
def maybe_infer_dtype_type(element):
"""Try to infer an object's dtype, for use in arithmetic ops
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
numpy.int64
"""
tipo = None
if hasattr(element, 'dtype'):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explicit type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if is_extension_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def maybe_cast_item(obj, item, dtype):
chunk = obj[item]
if chunk.values.dtype != dtype:
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
raise ValueError("Unexpected dtype encountered: {dtype}"
.format(dtype=dtype))
def invalidate_string_dtypes(dtype_set):
"""Change string like dtypes to object for
``DataFrame.select_dtypes()``.
"""
non_string_dtypes = dtype_set - _string_dtypes
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def maybe_convert_string_to_object(values):
"""
Convert string-like and string-like array to convert object dtype.
This is to avoid numpy to handle the array as str dtype.
"""
if isinstance(values, string_types):
values = np.array([values], dtype=object)
elif (isinstance(values, np.ndarray) and
issubclass(values.dtype.type, (np.string_, np.unicode_))):
values = values.astype(object)
return values
def maybe_convert_scalar(values):
"""
Convert a python scalar to the appropriate numpy dtype if possible
This avoids numpy directly converting according to platform preferences
"""
if is_scalar(values):
dtype, values = infer_dtype_from_scalar(values)
try:
values = dtype(values)
except TypeError:
pass
return values
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return _ensure_int8(indexer)
elif length < _int16_max:
return _ensure_int16(indexer)
elif length < _int32_max:
return _ensure_int32(indexer)
return _ensure_int64(indexer)
def coerce_to_dtypes(result, dtypes):
"""
given a dtypes and a result set, coerce the result elements to the
dtypes
"""
if len(result) != len(dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type
def conv(r, dtype):
try:
if isna(r):
pass
elif dtype == _NS_DTYPE:
r = tslib.Timestamp(r)
elif dtype == _TD_DTYPE:
r = _coerce_scalar_to_timedelta_type(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
if is_integer(r) and r not in [0, 1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
r = float(r)
elif dtype.kind == 'i':
r = int(r)
except Exception:
pass
return r
return [conv(r, dtype) for r, dtype in zip(result, dtypes)]
def astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, text_type):
# in Py3 that's str, in Py2 that's unicode
return lib.astype_unicode(arr.ravel()).reshape(arr.shape)
elif issubclass(dtype.type, string_types):
return lib.astype_str(arr.ravel()).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return tslib.ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == 'M':
return arr.astype(dtype)
raise TypeError("cannot astype a datetimelike from [{from_dtype}] "
"to [{to_dtype}]".format(from_dtype=arr.dtype,
to_dtype=dtype))
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return tslib.ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
# in py3, timedelta64[ns] are int64
if ((PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or
(not PY3 and dtype != _TD_DTYPE)):
# allow frequency conversions
# we return a float here!
if dtype.kind == 'm':
mask = isna(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
elif dtype == _TD_DTYPE:
return arr.astype(_TD_DTYPE, copy=copy)
raise TypeError("cannot astype a timedelta from [{from_dtype}] "
"to [{to_dtype}]".format(from_dtype=arr.dtype,
to_dtype=dtype))
elif (np.issubdtype(arr.dtype, np.floating) and
np.issubdtype(dtype, np.integer)):
if not np.isfinite(arr).all():
raise ValueError('Cannot convert non-finite values (NA or inf) to '
'integer')
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr).values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = ("Passing in '{dtype}' dtype with no frequency is "
"deprecated and will raise in a future version. "
"Please pass in '{dtype}[ns]' instead.")
warnings.warn(msg.format(dtype=dtype.name),
FutureWarning, stacklevel=5)
dtype = np.dtype(dtype.name + "[ns]")
if copy:
return arr.astype(dtype, copy=True)
return arr.view(dtype)
def maybe_convert_objects(values, convert_dates=True, convert_numeric=True,
convert_timedeltas=True, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = maybe_cast_to_datetime(
values, 'M8[ns]', errors='coerce')
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(values,
convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.core.tools.timedeltas import to_timedelta
new_values = to_timedelta(values, errors='coerce')
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
except Exception:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
values = values.copy() if copy else values
return values
def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True,
coerce=False, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError('At least one of datetime, numeric or timedelta must '
'be True.')
elif conversion_count > 1 and coerce:
raise ValueError("Only one of 'datetime', 'numeric' or "
"'timedelta' can be True when when coerce=True.")
if isinstance(values, (list, tuple)):
# List or scalar
values = np.array(values, dtype=np.object_)
elif not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
elif not is_object_dtype(values.dtype):
# If not object, do not attempt conversion
values = values.copy() if copy else values
return values
# If 1 flag is coerce, ensure 2 others are False
if coerce:
# Immediate return if coerce
if datetime:
from pandas import to_datetime
return to_datetime(values, errors='coerce', box=False)
elif timedelta:
from pandas import to_timedelta
return to_timedelta(values, errors='coerce', box=False)
elif numeric:
from pandas import to_numeric
return to_numeric(values, errors='coerce')
# Soft conversions
if datetime:
values = lib.maybe_convert_objects(values, convert_datetime=datetime)
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=timedelta)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
except Exception:
pass
return values
def maybe_castable(arr):
# return False to force a non-fastpath
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == 'M':
return is_datetime64_ns_dtype(arr.dtype)
elif kind == 'm':
return is_timedelta64_ns_dtype(arr.dtype)
return arr.dtype.name not in _POSSIBLY_CAST_DTYPES
def maybe_infer_to_datetimelike(value, convert_dates=False):
"""
we might have a array (or single object) that is datetime like,
and no dtype is passed don't change the value unless we find a
datetime/timedelta set
this is pretty strict in that a datetime/timedelta is REQUIRED
in addition to possible nulls/string likes
Parameters
----------
value : np.array / Series / Index / list-like
convert_dates : boolean, default False
if True try really hard to convert dates (such as datetime.date), other
leave inferred dtype 'date' alone
"""
if isinstance(value, (ABCDatetimeIndex, ABCPeriodIndex)):
return value
elif isinstance(value, ABCSeries):
if isinstance(value._values, ABCDatetimeIndex):
return value._values
v = value
if not is_list_like(v):
v = [v]
v = np.array(v, copy=False)
# we only care about object dtypes
if not is_object_dtype(v):
return value
shape = v.shape
if not v.ndim == 1:
v = v.ravel()
if not len(v):
return value
def try_datetime(v):
# safe coerce to datetime64
try:
# GH19671
v = tslib.array_to_datetime(v,
require_iso8601=True,
errors='raise')
except ValueError:
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
try:
from pandas._libs.tslibs import conversion
from pandas import DatetimeIndex
values, tz = conversion.datetime_to_datetime64(v)
return DatetimeIndex(values).tz_localize(
'UTC').tz_convert(tz=tz)
except (ValueError, TypeError):
pass
except Exception:
pass
return v.reshape(shape)
def try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas import to_timedelta
try:
return to_timedelta(v)._ndarray_values.reshape(shape)
except Exception:
return v.reshape(shape)
inferred_type = lib.infer_datetimelike_array(_ensure_object(v))
if inferred_type == 'date' and convert_dates:
value = try_datetime(v)
elif inferred_type == 'datetime':
value = try_datetime(v)
elif inferred_type == 'timedelta':
value = try_timedelta(v)
elif inferred_type == 'nat':
# if all NaT, return as datetime
if isna(v).all():
value = try_datetime(v)
else:
# We have at least a NaT and a string
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but
# technically is also a datetime
value = try_timedelta(v)
if lib.infer_dtype(value) in ['mixed']:
value = try_datetime(v)
return value
def maybe_cast_to_datetime(value, dtype, errors='raise'):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
from pandas.core.tools.timedeltas import to_timedelta
from pandas.core.tools.datetimes import to_datetime
if dtype is not None:
if isinstance(dtype, string_types):
dtype = np.dtype(dtype)
is_datetime64 = is_datetime64_dtype(dtype)
is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_datetime64tz or is_timedelta64:
# force the dtype if needed
msg = ("Passing in '{dtype}' dtype with no frequency is "
"deprecated and will raise in a future version. "
"Please pass in '{dtype}[ns]' instead.")
if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE):
if dtype.name in ('datetime64', 'datetime64[ns]'):
if dtype.name == 'datetime64':
warnings.warn(msg.format(dtype=dtype.name),
FutureWarning, stacklevel=5)
dtype = _NS_DTYPE
else:
raise TypeError("cannot convert datetimelike to "
"dtype [{dtype}]".format(dtype=dtype))
elif is_datetime64tz:
# our NaT doesn't support tz's
# this will coerce to DatetimeIndex with
# a matching dtype below
if is_scalar(value) and isna(value):
value = [value]
elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE):
if dtype.name in ('timedelta64', 'timedelta64[ns]'):
if dtype.name == 'timedelta64':
warnings.warn(msg.format(dtype=dtype.name),
FutureWarning, stacklevel=5)
dtype = _TD_DTYPE
else:
raise TypeError("cannot convert timedeltalike to "
"dtype [{dtype}]".format(dtype=dtype))
if is_scalar(value):
if value == iNaT or isna(value):
value = iNaT
else:
value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) or not is_dtype_equal(value.dtype,
dtype):
try:
if is_datetime64:
value = to_datetime(value, errors=errors)._values
elif is_datetime64tz:
# The string check can be removed once issue #13712
# is solved. String data that is passed with a
# datetime64tz is assumed to be naive which should
# be localized to the timezone.
is_dt_string = is_string_dtype(value)
value = to_datetime(value, errors=errors)
if is_dt_string:
# Strings here are naive, so directly localize
value = value.tz_localize(dtype.tz)
else:
# Numeric values are UTC at this point,
# so localize and convert
value = (value.tz_localize('UTC')
.tz_convert(dtype.tz))
elif is_timedelta64:
value = to_timedelta(value, errors=errors)._values
except (AttributeError, ValueError, TypeError):
pass
# coerce datetimelike to object
elif is_datetime64_dtype(value) and not is_datetime64_dtype(dtype):
if is_object_dtype(dtype):
if value.dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
ints = np.asarray(value).view('i8')
return tslib.ints_to_pydatetime(ints)
# we have a non-castable dtype that was passed
raise TypeError('Cannot cast datetime64 to {dtype}'
.format(dtype=dtype))
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if is_array and value.dtype.kind in ['M', 'm']:
dtype = value.dtype
if dtype.kind == 'M' and dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
elif dtype.kind == 'm' and dtype != _TD_DTYPE:
value = to_timedelta(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif not (is_array and not (issubclass(value.dtype.type, np.integer) or
value.dtype == np.object_)):
value = maybe_infer_to_datetimelike(value)
return value
def find_common_type(types):
"""
Find a common data type among the given dtypes.
Parameters
----------
types : list of dtypes
Returns
-------
pandas extension or numpy dtype
See Also
--------
numpy.find_common_type
"""
if len(types) == 0:
raise ValueError('no types given')
first = types[0]
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
if all(is_dtype_equal(first, t) for t in types[1:]):
return first
if any(isinstance(t, (PandasExtensionDtype, ExtensionDtype))
for t in types):
return np.object
# take lowest unit
if all(is_datetime64_dtype(t) for t in types):
return np.dtype('datetime64[ns]')
if all(is_timedelta64_dtype(t) for t in types):
return np.dtype('timedelta64[ns]')
# don't mix bool / int or float or complex
# this is different from numpy, which casts bool with float/int as int
has_bools = any(is_bool_dtype(t) for t in types)
if has_bools:
has_ints = any(is_integer_dtype(t) for t in types)
has_floats = any(is_float_dtype(t) for t in types)
has_complex = any(is_complex_dtype(t) for t in types)
if has_ints or has_floats or has_complex:
return np.object
return np.find_common_type(types, [])
def cast_scalar_to_array(shape, value, dtype=None):
"""
create np.ndarray of specified shape and dtype, filled with values
Parameters
----------
shape : tuple
value : scalar value
dtype : np.dtype, optional
dtype to coerce
Returns
-------
ndarray of shape, filled with value, of specified / inferred dtype
"""
if dtype is None:
dtype, fill_value = infer_dtype_from_scalar(value)
else:
fill_value = value
values = np.empty(shape, dtype=dtype)
values.fill(fill_value)
return values
def construct_1d_arraylike_from_scalar(value, length, dtype):
"""
create a np.ndarray / pandas type of specified shape and dtype
filled with values
Parameters
----------
value : scalar value
length : int
dtype : pandas_dtype / np.dtype
Returns
-------
np.ndarray / pandas type of length, filled with value
"""
if is_datetimetz(dtype):
from pandas import DatetimeIndex
subarr = DatetimeIndex([value] * length, dtype=dtype)
elif is_categorical_dtype(dtype):
from pandas import Categorical
subarr = Categorical([value] * length, dtype=dtype)
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
# coerce if we have nan for an integer dtype
if is_integer_dtype(dtype) and isna(value):
dtype = np.float64
subarr = np.empty(length, dtype=dtype)
subarr.fill(value)
return subarr
def construct_1d_object_array_from_listlike(values):
"""
Transform any list-like object in a 1-dimensional numpy array of object
dtype.
Parameters
----------
values : any iterable which has a len()
Raises
------
TypeError
* If `values` does not have a len()
Returns
-------
1-dimensional numpy array of dtype object
"""
# numpy will try to interpret nested lists as further dimensions, hence
# making a 1D array that contains list-likes is a bit tricky:
result = np.empty(len(values), dtype='object')
result[:] = values
return result
def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False):
"""
Construct a new ndarray, coercing `values` to `dtype`, preserving NA.
Parameters
----------
values : Sequence
dtype : numpy.dtype, optional
copy : bool, default False
Note that copies may still be made with ``copy=False`` if casting
is required.
Returns
-------
arr : ndarray[dtype]
Examples
--------
>>> np.array([1.0, 2.0, None], dtype='str')
array(['1.0', '2.0', 'None'], dtype='<U4')
>>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')
"""
subarr = np.array(values, dtype=dtype, copy=copy)
if dtype is not None and dtype.kind in ("U", "S"):
# GH-21083
# We can't just return np.array(subarr, dtype='str') since
# NumPy will convert the non-string objects into strings
# Including NA values. Se we have to go
# string -> object -> update NA, which requires an
# additional pass over the data.
na_values = isna(values)
subarr2 = subarr.astype(object)
subarr2[na_values] = np.asarray(values, dtype=object)[na_values]
subarr = subarr2
return subarr
| bsd-3-clause |
PAIR-code/lit | lit_nlp/api/model.py | 1 | 6746 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Base classes for LIT models."""
import abc
import inspect
from typing import List, Tuple, Iterable, Iterator, Text
import attr
from lit_nlp.api import types
import numpy as np
JsonDict = types.JsonDict
Spec = types.Spec
def maybe_copy(arr):
"""Decide if we should make a copy of an array in order to release memory.
NumPy arrays may be views into other array objects, by which a small array can
maintain a persistent pointer to a large block of memory that prevents it from
being garbage collected. This can quickly lead to memory issues as such
blocks accumulate during inference.
Args:
arr: a NumPy array
Returns:
arr, or a copy of arr
"""
if not isinstance(arr, np.ndarray):
return arr
# If this is not a view of another array.
if arr.base is None:
return arr
# Heuristic to check if we should 'detach' this array from the parent blob.
# We want to know if this array is a view that might leak memory.
# The simplest check is if arr.base is larger than arr, but we don't want to
# make unnecessary copies when this is just due to slicing along a batch,
# because the other rows are likely still in use.
# TODO(lit-dev): keep an eye on this, if we continue to have memory issues
# we can make copies more aggressively.
if arr.base.ndim > 1 and np.prod(arr.base.shape[1:]) > np.prod(arr.shape):
return np.copy(arr)
# If only a batch slice, reshape, or otherwise.
return arr
def scrub_numpy_refs(output: JsonDict) -> JsonDict:
"""Scrub problematic pointers. See maybe_copy() and Model.predict()."""
return {k: maybe_copy(v) for k, v in output.items()}
@attr.s(auto_attribs=True, frozen=True)
class ModelSpec(object):
"""Model spec."""
input: Spec
output: Spec
def is_compatible_with_dataset(self, dataset_spec: Spec) -> bool:
"""Return true if this model is compatible with the dataset spec."""
for key, field_spec in self.input.items():
if key in dataset_spec:
# If the field is in the dataset, make sure it's compatible.
if not dataset_spec[key].is_compatible(field_spec):
return False
else:
# If the field isn't in the dataset, only allow if the model marks as
# optional.
if field_spec.required:
return False
return True
class Model(metaclass=abc.ABCMeta):
"""Base class for LIT models."""
def description(self) -> str:
"""Return a human-readable description of this component.
Defaults to class docstring, but subclass may override this to be
instance-dependent - for example, including the path from which the model
was loaded.
Returns:
(string) A human-readable description for display in the UI.
"""
return inspect.getdoc(self) or ''
def max_minibatch_size(self) -> int:
"""Maximum minibatch size for this model."""
return 1
@abc.abstractmethod
def predict_minibatch(self, inputs: List[JsonDict]) -> List[JsonDict]:
"""Run prediction on a batch of inputs.
Args:
inputs: sequence of inputs, following model.input_spec()
Returns:
list of outputs, following model.output_spec()
"""
return
@abc.abstractmethod
def input_spec(self) -> types.Spec:
"""Return a spec describing model inputs."""
return
@abc.abstractmethod
def output_spec(self) -> types.Spec:
"""Return a spec describing model outputs."""
return
def spec(self) -> ModelSpec:
return ModelSpec(input=self.input_spec(), output=self.output_spec())
def get_embedding_table(self) -> Tuple[List[Text], np.ndarray]:
"""Return the full vocabulary and embedding table.
Implementing this is optional, but needed for some techniques such as
HotFlip which use the embedding table to search over candidate words.
Returns:
(<string>[vocab_size], <float32>[vocab_size, emb_dim])
"""
raise NotImplementedError('get_embedding_table() not implemented for ' +
self.__class__.__name__)
def fit_transform_with_metadata(self, indexed_inputs: List[JsonDict]):
"""For internal use by UMAP and other sklearn-based models."""
raise NotImplementedError(
'fit_transform_with_metadata() not implemented for ' +
self.__class__.__name__)
##
# Concrete implementations of common functions.
def predict(self,
inputs: Iterable[JsonDict],
scrub_arrays=True,
**kw) -> Iterator[JsonDict]:
"""Run prediction on a dataset.
This uses minibatch inference for efficiency, but yields per-example output.
Args:
inputs: iterable of input dicts
scrub_arrays: if True, will copy some returned NumPy arrays in order to
allow garbage collection of intermediate data. Strongly recommended if
results will not be immediately consumed and discarded, as otherwise the
common practice of slicing arrays returned by e.g. TensorFlow can result
in large memory leaks.
**kw: additional kwargs passed to predict_minibatch()
Returns:
model outputs, for each input
"""
results = self._batched_predict(inputs, **kw)
if scrub_arrays:
results = (scrub_numpy_refs(res) for res in results)
return results
def _batched_predict(self, inputs: Iterable[JsonDict],
**kw) -> Iterator[JsonDict]:
"""Internal helper to predict using minibatches."""
minibatch_size = self.max_minibatch_size(**kw)
minibatch = []
for ex in inputs:
if len(minibatch) < minibatch_size:
minibatch.append(ex)
if len(minibatch) >= minibatch_size:
yield from self.predict_minibatch(minibatch, **kw)
minibatch = []
if len(minibatch) > 0: # pylint: disable=g-explicit-length-test
yield from self.predict_minibatch(minibatch, **kw)
def predict_with_metadata(self, indexed_inputs: Iterable[JsonDict],
**kw) -> Iterator[JsonDict]:
"""As predict(), but inputs are IndexedInput."""
return self.predict((ex['data'] for ex in indexed_inputs), **kw)
| apache-2.0 |
apdjustino/DRCOG_Urbansim | synthicity/urbansim/transitionmodel.py | 1 | 4937 | import pandas as pd, numpy as np
def estimate(dset,config,year=None,show=True,variables=None):
return
def simulate(dset,config,year=None,show=True,variables=None,subtract=False):
assert "table" in config
assert "writetotmp" in config
assert "geography_field" in config
assert "control_totals" in config or "vacancy_targets" in config
assert "amount_field" in config or "vacancy_targets" in config
assert "first_year" in config
year_step = 1 if "year_step" not in config else config["year_step"]
first_year = config["first_year"]
curyear = year
prevyear = curyear - year_step
hhs = eval(config["table"])
outtblname = config["writetotmp"]
if curyear == first_year:
hhs["_year_added_"] = np.array([curyear]*len(hhs.index))
dset.save_tmptbl(outtblname,hhs)
return
# print hhs
#if show: print hhs.describe()
if "control_totals" in config:
control_totals = eval(config["control_totals"])
cur_ct = control_totals.ix[curyear]
prev_ct = control_totals.ix[prevyear]
if "vacancy_targets" in config:
va_cfg = config["vacancy_targets"]
assert "targets" in va_cfg and "supply" in va_cfg and "demands" in va_cfg
demands = va_cfg["demands"]
num = eval(demands[0])
for item in demands[1:]: num = num.add(eval(item),fill_value=0)
denom = eval(va_cfg["supply"])
# print "Numerator:\n", num
# print "Denominator:\n", denom
vacancy = (denom-num)/denom
if "negative_vacancy" in config and config["negative_vacancy"] == False: vacancy[vacancy<0] = 0
# print "Vacancy = (denom-num)/denom:\n", vacancy
targets = eval(va_cfg["targets"])
target_vacancy = targets[year]
# print "Minimum vacancy (from target_vacancy table):\n", target_vacancy
vacancy_diff = (target_vacancy-vacancy).dropna()
# print "Vacancy diff = target-actual:\n", vacancy_diff
newunits = (vacancy_diff[vacancy_diff>0]*denom).dropna()
print "New units to build (vacancy_diff * denom):\n", newunits
control_totals = cur_ct = newunits.reset_index()
prev_ct = None
config["amount_field"] = 0
cols = []
for col in control_totals.columns:
if col <> config["amount_field"]: cols.append(col)
if type(cur_ct) == pd.DataFrame:
if prev_ct is not None:
#cnt = cur_ct.reset_index(drop=True).set_index(cols) - prev_ct.reset_index(drop=True).set_index(cols)
cnt = cur_ct.reset_index(drop=True).set_index(cols)
else:
cnt = cur_ct.reset_index(drop=True).set_index(cols)
else:
cnt = cur_ct - prev_ct
print cnt
print "Adding agents to match target of %d " % cnt.sum()
newhh = []
if type(cur_ct) == pd.DataFrame:
for row in cnt.iterrows():
index,row = row
subset = hhs
if type(index) in [np.int32,np.int64]: index = [index]
for col,name in zip(index,cols): subset = subset[subset[name] == col]
# print 'subset size'
subset_size = len(subset.index.values)
if "size_field" in config: subset_size = subset[config["size_field"]].sum()
# print subset_size
num = row.values[0]
# print 'target'
# print num
if "vacancy_targets" not in config:
num = num - subset_size
if subset_size == 0:
continue
print 'action'
print num
if subtract:
if num == 0: continue
if num < 0:
hhs = hhs[np.invert(np.in1d(hhs.index.values,np.random.choice(subset.index.values,abs(num),replace=False)))]
else:
tmphh = hhs.ix[np.random.choice(subset.index.values,num)]
if "size_field" in config: tmphh = tmphh[np.cumsum(tmphh[config["size_field"]].values)<num]
newhh.append(tmphh)
else:
if num == 0: continue
if num < 0: #continue
tmphh = hhs.ix[np.random.choice(subset.index.values,abs(num))]
if "size_field" in config: tmphh = tmphh[np.cumsum(tmphh[config["size_field"]].values)<abs(num)]
hhs = hhs[np.invert(np.in1d(hhs.index.values,tmphh.index.values))]
else:
tmphh = hhs.ix[np.random.choice(subset.index.values,num)]
if "size_field" in config: tmphh = tmphh[np.cumsum(tmphh[config["size_field"]].values)<num]
newhh.append(tmphh)
else:
num = cnt.values[0]
if num <> 0: newhh.append(hhs.ix[np.random.choice(hhs.index.values,num,replace=False)])
if not newhh:
print 'None!!!'
return # no new agents
newhh = pd.concat(newhh)
newhh[config["geography_field"]] = -1
newhh["_year_added_"] = np.array([curyear]*len(newhh.index))
if hhs.index.values.dtype not in [np.int32,np.int64]: raise Exception("Only unique integer labels are allowed")
newhh = newhh.set_index(np.arange(len(newhh.index))+np.amax(hhs.index.values)+1)
hhs = pd.concat([hhs,newhh])
dset.save_tmptbl(outtblname,hhs)
dset.save_table(outtblname)
print "Histogram of agents by year:\n", hhs._year_added_.value_counts()
| agpl-3.0 |
bayesimpact/bob-emploi | data_analysis/misc/job_groups_skills.py | 1 | 4577 | """A script to gather basic skills for each job group sorted by priority.
This scripts take as an input the ROME "liens_rome_referentiels" table
and skills associated to job offers provided by the cleaned_data lib.
It outputs a JSON file per job group with a list of skills.
"""
import os
from os import path
from typing import Optional
import pandas as pd
from bob_emploi.data_analysis.lib import cleaned_data
_ROME_VERSION = 'v346'
_BASIC_SKILL_RUBRIQUE_CODE = 6
_BASIC_ACTIVITY_RUBRIQUE_CODE = 7
def _merge_hard_skills(
skills: pd.DataFrame, activities: pd.DataFrame,
rome_crosswalks: pd.DataFrame) -> pd.DataFrame:
"""Make a hard skill dataframe."""
skills.set_index('code_ogr', inplace=True)
activities.set_index('code_ogr', inplace=True)
skills_rome = rome_crosswalks.join(skills, how='inner', on='code_ogr', )
activities_rome = rome_crosswalks.join(activities, how='inner', on='code_ogr')
return skills_rome[[
'code_rome', 'code_ogr', 'libelle_competence', 'code_ref_rubrique']] \
.append(
activities_rome[['code_rome', 'code_ogr', 'libelle_activite', 'code_ref_rubrique']]
.rename(columns={'libelle_activite': 'libelle_competence'}))
def _get_skill_freq_in_offers(job_offers_skills: pd.DataFrame) -> pd.DataFrame:
"""Get the frequency of a skill among the job offers for this occupation """
# Remove duplicated skills of each offers.
job_offers_skills.drop_duplicates(['offer_num', 'code_ogr'], inplace=True)
skills_occurences = job_offers_skills.groupby(
'rome_profession_card_code').code_ogr.value_counts()
num_offers_per_rome = job_offers_skills.groupby(
'rome_profession_card_code').offer_num.nunique()
return skills_occurences.div(num_offers_per_rome).rename('frequency')
def main(
data_folder: str = 'data',
rome_crosswalks_filename: Optional[str] = None,
job_offers_filename: Optional[str] = None,
skills_filename: Optional[str] = None,
activities_filename: Optional[str] = None,
out_dir: str = 'job_group_skills') -> None:
"""Get prioritized skills for each job group."""
if not rome_crosswalks_filename:
rome_crosswalks_filename = path.join(
data_folder, f'rome/csv/unix_liens_rome_referentiels_{_ROME_VERSION}_utf8.csv')
if not job_offers_filename:
job_offers_filename = path.join(data_folder, 'job_offers/recent_job_offers.csv')
if not skills_filename:
skills_filename = path.join(
data_folder, f'rome/csv/unix_referentiel_competence_{_ROME_VERSION}_utf8.csv')
if not activities_filename:
activities_filename = path.join(
data_folder, f'rome/csv/unix_referentiel_activite_{_ROME_VERSION}_utf8.csv')
out_dir_path = path.join(data_folder, out_dir)
if not path.isdir(out_dir_path):
os.mkdir(out_dir_path)
job_offers_skills = cleaned_data.job_offers_skills(data_folder)
rome_crosswalks = pd.read_csv(rome_crosswalks_filename)
skills = pd.read_csv(skills_filename)
activities = pd.read_csv(activities_filename)
# Get the frequency of a skill among the job offers for this occupation.
skills_freq_in_offers = _get_skill_freq_in_offers(job_offers_skills).reset_index().rename(
columns={'rome_profession_card_code': 'code_rome'})
# Keep only the basic and not the specific skills.
hard_skills_rome = _merge_hard_skills(skills, activities, rome_crosswalks)
basic_hard_skills_rome = hard_skills_rome.loc[
(hard_skills_rome.code_ref_rubrique == _BASIC_SKILL_RUBRIQUE_CODE) |
(hard_skills_rome.code_ref_rubrique == _BASIC_ACTIVITY_RUBRIQUE_CODE)]
basic_hard_skills_rome.set_index(['code_ogr', 'code_rome'], inplace=True)
# Add priorities.
prioritized_hard_skills = basic_hard_skills_rome.join(
skills_freq_in_offers.set_index(['code_ogr', 'code_rome']), how='left')\
.fillna(0).reset_index()
prioritized_hard_skills['isPriority'] = prioritized_hard_skills.frequency.gt(.5)
clean_columns = prioritized_hard_skills.rename({
'libelle_competence': 'name',
'code_ogr': 'codeOgr',
}, axis='columns').sort_values(['frequency'], ascending=False).drop([
'code_ref_rubrique',
'frequency',
], axis='columns')
clean_columns.groupby('code_rome').apply(
lambda df: df
.drop('code_rome', axis='columns')
.to_json(path.join(out_dir_path, f'skills_{df.name}.json'), orient='records'))
if __name__ == '__main__':
main()
| gpl-3.0 |
clovett/MissionPlanner | Lib/site-packages/scipy/signal/ltisys.py | 53 | 23848 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
#
from filter_design import tf2zpk, zpk2tf, normalize
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
#import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator
polynomials.
Returns
-------
A, B, C, D : ndarray
State space representation of the system.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if (M > K):
raise ValueError("Improper transfer function.")
if (M == 0 or K == 0): # Null system
return array([],float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1',zeros((num.shape[0],K-M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:,0]
else:
D = array([],float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K-2, K-1)]
B = eye(K-1, 1)
C = num[:,1:] - num[:,0] * den[1:]
return A, B, C, D
def _none_to_empty(arg):
if arg is None:
return []
else:
return arg
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
"""
A, B, C, D = map(_none_to_empty, (A, B, C, D))
A, B, C, D = map(atleast_2d, (A, B, C, D))
if ((len(A.shape) > 2) or (len(B.shape) > 2) or \
(len(C.shape) > 2) or (len(D.shape) > 2)):
raise ValueError("A, B, C, D arrays can be no larger than rank-2.")
MA, NA = A.shape
MB, NB = B.shape
MC, NC = C.shape
MD, ND = D.shape
if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0):
MC, NC = MD, NA
C = zeros((MC, NC))
if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0):
MB, NB = MA, ND
B = zeros(MB, NB)
if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0):
MD, ND = MC, NB
D = zeros(MD, ND)
if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0):
MA, NA = MB, NC
A = zeros(MA, NA)
if MA != NA:
raise ValueError("A must be square.")
if MA != MB:
raise ValueError("A and B must have the same number of rows.")
if NA != NC:
raise ValueError("A and C must have the same number of columns.")
if MD != MC:
raise ValueError("C and D must have the same number of rows.")
if ND != NB:
raise ValueError("B and D must have the same number of columns.")
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num, den : 1D ndarray
Numerator and denominator polynomials (as sequences)
respectively.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and
# make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:,input]
B.shape = (B.shape[0],1)
if D.shape[-1] != 0:
D = D[:,input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape,axis=0) == 0) and (product(C.shape,axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape,axis=0) == 0) and (product(A.shape,axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:,0] + B[:,0] + C[0,:] + D
num = numpy.zeros((nout, num_states+1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k,:])
num[k] = poly(A - dot(B,Ck)) + (D[k]-1)*den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State-space matrices.
"""
return tf2ss(*zpk2tf(z,p,k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A,B,C,D,input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
"""
def __init__(self,*args,**kwords):
"""Initialize the LTI system using either:
(numerator, denominator)
(zeros, poles, gain)
(A, B, C, D) -- state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self.__dict__['num'], self.__dict__['den'] = normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = tf2zpk(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = tf2ss(*args)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = args
self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = zpk2ss(*args)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = abcd_normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = ss2zpk(*args)
self.__dict__['num'], self.__dict__['den'] = ss2tf(*args)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __setattr__(self, attr, val):
if attr in ['num','den']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
tf2zpk(self.num, self.den)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
tf2ss(self.num, self.den)
elif attr in ['zeros', 'poles', 'gain']:
self.__dict__[attr] = val
self.__dict__['num'], self.__dict__['den'] = \
zpk2tf(self.zeros,
self.poles, self.gain)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
zpk2ss(self.zeros,
self.poles, self.gain)
elif attr in ['A', 'B', 'C', 'D']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
ss2zpk(self.A, self.B,
self.C, self.D)
self.__dict__['num'], self.__dict__['den'] = \
ss2tf(self.A, self.B,
self.C, self.D)
else:
self.__dict__[attr] = val
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
odeint. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses :func:`scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for :func:`scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0],sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an excpetion; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1,1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A,x) + squeeze(dot(sys.B,nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C,transpose(xout)) + dot(sys.D,transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A,x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C,transpose(xout))
return T, squeeze(transpose(yout)), xout
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
# system is an lti system or a sequence
# with 2 (num, den)
# 3 (zeros, poles, gain)
# 4 (A, B, C, D)
# describing the system
# U is an input vector at times T
# if system describes multiple inputs
# then U can be a rank-2 array with the number of columns
# being the number of inputs
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0],1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T),sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1]-T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1,ATm1)
I = eye(A.shape[0],dtype=A.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
for k in xrange(1,len(T)):
dt1 = T[k] - T[k-1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
xout[k] = dot(xout[k-1],GT) + dot(U[k-1],F1T)
if interp:
xout[k] = xout[k] + dot((U[k]-U[k-1]),F2T)
yout = squeeze(dot(U,transpose(sys.D))) + squeeze(dot(xout,transpose(sys.C)))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval. This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7*tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : LTI class or tuple
If specified as a tuple, the system is described as
``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``.
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
h = zeros(T.shape, sys.A.dtype)
s,v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s*T[k]))
eA = (dot(dot(v,es),vi)).astype(h.dtype)
h[k] = squeeze(dot(dot(C,eA),B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
U = zeros_like(T)
ic = B + X0
Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
**kwargs :
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
:func:`scipy.integrate.odeint`. See the documentation for
:func:`scipy.integrate.odeint` for information about these
arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
| gpl-3.0 |
yousrabk/mne-python | examples/visualization/plot_ssp_projs_sensitivity_map.py | 18 | 1286 | """
==================================
Sensitivity map of SSP projections
==================================
This example shows the sources that have a forward field
similar to the first SSP vector correcting for ECG.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from mne import read_forward_solution, read_proj, sensitivity_map
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
fwd = read_forward_solution(fname, surf_ori=True)
projs = read_proj(ecg_fname)
projs = projs[3:][::2] # take only one projection per channel type
# Compute sensitivity map
ssp_ecg_map = sensitivity_map(fwd, ch_type='grad', projs=projs, mode='angle')
###############################################################################
# Show sensitivity map
plt.hist(ssp_ecg_map.data.ravel())
plt.show()
args = dict(clim=dict(kind='value', lims=(0.2, 0.6, 1.)), smoothing_steps=7,
hemi='rh', subjects_dir=subjects_dir)
ssp_ecg_map.plot(subject='sample', time_label='ECG SSP sensitivity', **args)
| bsd-3-clause |
hsiaoyi0504/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
yaowenqiang/mtools | mtools/mplotqueries/plottypes/base_type.py | 7 | 3859 | from mtools.util import OrderedDict
from mtools.util.log2code import Log2CodeConverter
from mtools.util.grouping import Grouping
import re
from datetime import MINYEAR, MAXYEAR, datetime, timedelta
from dateutil.tz import tzutc
import types
try:
from matplotlib import cm
except ImportError:
raise ImportError("Can't import matplotlib. See https://github.com/rueckstiess/mtools/blob/master/INSTALL.md for instructions how to install matplotlib or try mlogvis instead, which is a simplified version of mplotqueries that visualizes the logfile in a web browser.")
class BasePlotType(object):
# 14 most distinguishable colors, according to
# http://stackoverflow.com/questions/309149/generate-distinctly-different-rgb-colors-in-graphs
colors = ['#000000','#00FF00','#0000FF','#FF0000','#01FFFE','#FFA6FE','#FFDB66','#006401', \
'#010067','#95003A','#007DB5','#FF00F6','#FFEEE8','#774D00']
color_index = 0
markers = ['o', 's', '<', 'D']
marker_index = 0
sort_order = 0
plot_type_str = 'base'
default_group_by = None
date_range = (datetime(MAXYEAR, 12, 31, tzinfo=tzutc()), datetime(MINYEAR, 1, 1, tzinfo=tzutc()))
def __init__(self, args=None, unknown_args=None):
self.args = args
self.unknown_args = unknown_args
self.groups = OrderedDict()
self.empty = True
self.limits = None
if self.args['optime_start']:
self.xlabel = 'time (start of ops)'
else:
self.xlabel = 'time (end of ops)'
def accept_line(self, logevent):
""" return True if this PlotType can plot this line. """
return True
def add_line(self, logevent):
""" append log line to this plot type. """
key = None
self.empty = False
self.groups.setdefault(key, list()).append(logevent)
@property
def logevents(self):
""" iterator yielding all logevents from groups dictionary. """
for key in self.groups:
for logevent in self.groups[key]:
yield logevent
@classmethod
def color_map(cls, group):
color = cls.colors[cls.color_index]
cls.color_index += 1
marker = cls.markers[cls.marker_index]
if cls.color_index >= len(cls.colors):
cls.marker_index += 1
cls.marker_index %= len(cls.markers)
cls.color_index %= cls.color_index
return color, marker
def group(self):
""" (re-)group all logevents by the given group. """
if hasattr(self, 'group_by'):
group_by = self.group_by
else:
group_by = self.default_group_by
if self.args['group'] != None:
group_by = self.args['group']
self.groups = Grouping(self.logevents, group_by)
self.groups.move_items(None, 'others')
self.groups.sort_by_size(group_limit=self.args['group_limit'], discard_others=self.args['no_others'])
def plot_group(self, group, idx, axis):
raise NotImplementedError("BasePlotType can't plot. Use a derived class instead")
def clicked(self, event):
""" this is called if an element of this plottype was clicked. Implement in sub class. """
pass
def plot(self, axis, ith_plot, total_plots, limits):
self.limits = limits
artists = []
print self.plot_type_str.upper(), "plot"
print "%5s %9s %s"%("id", " #points", "group")
for idx, group in enumerate(self.groups):
print "%5s %9s %s"%(idx+1, len(self.groups[group]), group)
group_artists = self.plot_group(group, idx+ith_plot, axis)
if isinstance(group_artists, list):
artists.extend(group_artists)
else:
artists.append(group_artists)
print
return artists
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.