repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
junwucs/h2o-3 | h2o-py/h2o/two_dim_table.py | 3 | 2328 | """
A two dimensional table having row and column headers.
"""
import copy
import h2o
from frame import _is_list_of_lists
class H2OTwoDimTable(object):
"""
A class representing an 2D table (for pretty printing output).
"""
def __init__(self, row_header=None, col_header=None, col_types=None,
table_header=None, raw_cell_values=None,
col_formats=None, cell_values=None, table_description=None):
self.row_header = row_header
self.col_header = col_header
self.col_types = col_types
self.table_header = table_header
self.cell_values = cell_values if cell_values else self._parse_values(raw_cell_values, col_types)
self.col_formats = col_formats
self.table_description = table_description
def show(self, header=True):
if h2o.can_use_pandas():
import pandas
pandas.options.display.max_rows = 20
print pandas.DataFrame(self.cell_values,columns=self.col_header)
return
print
if header:
print self.table_header + ":",
if self.table_description: print self.table_description
print
table = copy.deepcopy(self.cell_values)
nr=0
if _is_list_of_lists(table): nr = len(table) # only set if we truly have multiple rows... not just one long row :)
if nr > 20: # create a truncated view of the table, first/last 5 rows
trunc_table =[]
trunc_table += [ v for v in table[:5]]
trunc_table.append(["---"]*len(table[0]))
trunc_table += [v for v in table[(nr-5):]]
table = trunc_table
h2o.H2ODisplay(table, self.col_header, numalign="left", stralign="left")
def __repr__(self):
self.show()
return ""
def _parse_values(self, values, types):
if self.col_header[0] is None:
self.col_header = self.col_header[1:]
types = types[1:]
values = values[1:]
for col_index, column in enumerate(values):
for row_index, row_value in enumerate(column):
if types[col_index] == 'integer':
values[col_index][row_index] = "" if row_value is None else int(float(row_value))
elif types[col_index] in ['double', 'float', 'long']:
values[col_index][row_index] = "" if row_value is None else float(row_value)
else: # string?
continue
return zip(*values) # transpose the values! <3 splat ops
| apache-2.0 |
jmhsi/justin_tinker | data_science/courses/temp/structured.py | 1 | 14718 | from .imports import *
from sklearn_pandas import DataFrameMapper
from sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler
from pandas.api.types import is_string_dtype, is_numeric_dtype
from sklearn.ensemble import forest
from sklearn.tree import export_graphviz
def set_plot_sizes(sml, med, big):
plt.rc('font', size=sml) # controls default text sizes
plt.rc('axes', titlesize=sml) # fontsize of the axes title
plt.rc('axes', labelsize=med) # fontsize of the x and y labels
plt.rc('xtick', labelsize=sml) # fontsize of the tick labels
plt.rc('ytick', labelsize=sml) # fontsize of the tick labels
plt.rc('legend', fontsize=sml) # legend fontsize
plt.rc('figure', titlesize=big) # fontsize of the figure title
def parallel_trees(m, fn, n_jobs=8):
return list(ProcessPoolExecutor(n_jobs).map(fn, m.estimators_))
def draw_tree(t, df, size=10, ratio=0.6, precision=0):
""" Draws a representation of a random forest in IPython.
Parameters:
-----------
t: The tree you wish to draw
df: The data used to train the tree. This is used to get the names of the features.
"""
s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,
special_characters=True, rotate=True, precision=precision)
IPython.display.display(graphviz.Source(re.sub('Tree {',
f'Tree {{ size={size}; ratio={ratio}', s)))
def combine_date(years, months=1, days=1, weeks=None, hours=None, minutes=None,
seconds=None, milliseconds=None, microseconds=None, nanoseconds=None):
years = np.asarray(years) - 1970
months = np.asarray(months) - 1
days = np.asarray(days) - 1
types = ('<M8[Y]', '<m8[M]', '<m8[D]', '<m8[W]', '<m8[h]',
'<m8[m]', '<m8[s]', '<m8[ms]', '<m8[us]', '<m8[ns]')
vals = (years, months, days, weeks, hours, minutes, seconds,
milliseconds, microseconds, nanoseconds)
return sum(np.asarray(v, dtype=t) for t, v in zip(types, vals)
if v is not None)
def get_sample(df,n):
""" Gets a random sample of n rows from df, without replacement.
Parameters:
-----------
df: A pandas data frame, that you wish to sample from.
n: The number of rows you wish to sample.
Returns:
--------
return value: A random sample of n rows of df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
>>> get_sample(df, 2)
col1 col2
2 3 a
1 2 b
"""
idxs = sorted(np.random.permutation(len(df))[:n])
return df.iloc[idxs].copy()
def add_datepart(df, fldname, drop=True):
"""add_datepart converts a column of df from a datetime64 to many columns containing
the information from the date. This applies changes inplace.
Parameters:
-----------
df: A pandas data frame. df gain several new columns.
fldname: A string that is the name of the date column you wish to expand.
If it is not a datetime64 series, it will be converted to one with pd.to_datetime.
drop: If true then the original date column will be removed.
Examples:
---------
>>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) })
>>> df
A
0 2000-03-11
1 2000-03-12
2 2000-03-13
>>> add_datepart(df, 'A')
>>> df
AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed
0 2000 3 10 11 5 71 False False False False False False 952732800
1 2000 3 10 12 6 72 False False False False False False 952819200
2 2000 3 11 13 0 73 False False False False False False 952905600
"""
fld = df[fldname]
if not np.issubdtype(fld.dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True)
targ_pre = re.sub('[Dd]ate$', '', fldname)
for n in ('Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'):
df[targ_pre+n] = getattr(fld.dt,n.lower())
df[targ_pre+'Elapsed'] = fld.astype(np.int64) // 10**9
if drop: df.drop(fldname, axis=1, inplace=True)
def is_date(x): return np.issubdtype(x.dtype, np.datetime64)
def train_cats(df):
"""Change any columns of strings in a panda's dataframe to a column of
catagorical values. This applies the changes inplace.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category
"""
for n,c in df.items():
if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
def apply_cats(df, trn):
"""Changes any columns of strings in df into categorical variables using trn as
a template for the category codes.
Parameters:
-----------
df: A pandas dataframe. Any columns of strings will be changed to
categorical values. The category codes are determined by trn.
trn: A pandas dataframe. When creating a category for df, it looks up the
what the category's code were in trn and makes those the category codes
for df.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category {a : 1, b : 2}
>>> df2 = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['b', 'a', 'a']})
>>> apply_cats(df2, df)
col1 col2
0 1 b
1 2 b
2 3 a
now the type of col is category {a : 1, b : 2}
"""
for n,c in df.items():
if (n in trn.columns) and (trn[n].dtype.name=='category'):
df[n] = pd.Categorical(c, categories=trn[n].cat.categories, ordered=True)
def fix_missing(df, col, name, na_dict):
""" Fill missing data in a column of df with the median, and add a {name}_na column
which specifies if the data was missing.
Parameters:
-----------
df: The data frame that will be changed.
col: The column of data to fix by filling in missing data.
name: The name of the new filled column in df.
na_dict: A dictionary of values to create na's of and the value to insert. If
name is not a key of na_dict the median will fill any missing data. Also
if name is not a key of na_dict and there is no missing data in col, then
no {name}_na column is not created.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> fix_missing(df, df['col1'], 'col1', {})
>>> df
col1 col2 col1_na
0 1 5 False
1 2 2 True
2 3 2 False
>>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> fix_missing(df, df['col2'], 'col2', {})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> df = pd.DataFrame({'col1' : [1, np.NaN, 3], 'col2' : [5, 2, 2]})
>>> df
col1 col2
0 1 5
1 nan 2
2 3 2
>>> fix_missing(df, df['col1'], 'col1', {'col1' : 500})
>>> df
col1 col2
0 1 5
1 500 2
2 3 2
"""
if is_numeric_dtype(col):
if pd.isnull(col).sum() or (name in na_dict):
df[name+'_na'] = pd.isnull(col)
filler = na_dict[name] if name in na_dict else col.median()
df[name] = col.fillna(filler)
na_dict[name] = filler
return na_dict
def numericalize(df, col, name, max_n_cat):
""" Changes the column col from a categorical type to it's integer codes.
Parameters:
-----------
df: A pandas dataframe. df[name] will be filled with the integer codes from
col.
col: The column you wish to change into the categories.
name: The column name you wish to insert into df. This column will hold the
integer codes.
max_n_cat: If col has more categories than max_n_cat it will not change the
it to its integer codes. If max_n_cat is None, then col will always be
converted.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category { a : 1, b : 2}
>>> numericalize(df, df['col2'], 'col3', None)
col1 col2 col3
0 1 a 1
1 2 b 2
2 3 a 1
"""
if not is_numeric_dtype(col) and ( max_n_cat is None or col.nunique()>max_n_cat):
df[name] = col.cat.codes+1
def scale_vars(df, mapper):
warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)
if mapper is None:
map_f = [([n],StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]
mapper = DataFrameMapper(map_f).fit(df)
df[mapper.transformed_names_] = mapper.transform(df)
return mapper
def proc_df(df, y_fld, skip_flds=None, do_scale=False, na_dict=None,
preproc_fn=None, max_n_cat=None, subset=None, mapper=None):
""" proc_df takes a data frame df and splits off the response variable, and
changes the df into an entirely numeric dataframe.
Parameters:
-----------
df: The data frame you wish to process.
y_fld: The name of the response variable
skip_flds: A list of fields that dropped from df.
do_scale: Standardizes each column in df,Takes Boolean Values(True,False)
na_dict: a dictionary of na columns to add. Na columns are also added if there
are any missing values.
preproc_fn: A function that gets applied to df.
max_n_cat: The maximum number of categories to break into dummy values, instead
of integer codes.
subset: Takes a random subset of size subset from df.
mapper: If do_scale is set as True, the mapper variable
calculates the values used for scaling of variables during training time(mean and standard deviation).
Returns:
--------
[x, y, nas, mapper(optional)]:
x: x is the transformed version of df. x will not have the response variable
and is entirely numeric.
y: y is the response variable
nas: returns a dictionary of which nas it created, and the associated median.
mapper: A DataFrameMapper which stores the mean and standard deviation of the corresponding continous
variables which is then used for scaling of during test-time.
Examples:
---------
>>> df = pd.DataFrame({'col1' : [1, 2, 3], 'col2' : ['a', 'b', 'a']})
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
note the type of col2 is string
>>> train_cats(df)
>>> df
col1 col2
0 1 a
1 2 b
2 3 a
now the type of col2 is category { a : 1, b : 2}
>>> x, y, nas = proc_df(df, 'col1')
>>> x
col2
0 1
1 2
2 1
>>> data = DataFrame(pet=["cat", "dog", "dog", "fish", "cat", "dog", "cat", "fish"],
children=[4., 6, 3, 3, 2, 3, 5, 4],
salary=[90, 24, 44, 27, 32, 59, 36, 27])
>>> mapper = DataFrameMapper([(:pet, LabelBinarizer()),
([:children], StandardScaler())])
>>>round(fit_transform!(mapper, copy(data)), 2)
8x4 Array{Float64,2}:
1.0 0.0 0.0 0.21
0.0 1.0 0.0 1.88
0.0 1.0 0.0 -0.63
0.0 0.0 1.0 -0.63
1.0 0.0 0.0 -1.46
0.0 1.0 0.0 -0.63
1.0 0.0 0.0 1.04
0.0 0.0 1.0 0.21
"""
if not skip_flds: skip_flds=[]
if subset: df = get_sample(df,subset)
df = df.copy()
if preproc_fn: preproc_fn(df)
y = df[y_fld].values
df.drop(skip_flds+[y_fld], axis=1, inplace=True)
if na_dict is None: na_dict = {}
for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
if do_scale: mapper = scale_vars(df, mapper)
for n,c in df.items(): numericalize(df, c, n, max_n_cat)
res = [pd.get_dummies(df, dummy_na=True), y, na_dict]
if do_scale: res = res + [mapper]
return res
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}
).sort_values('imp', ascending=False)
def set_rf_samples(n):
""" Changes Scikit learn's random forests to give each tree a random sample of
n random rows.
"""
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n))
def reset_rf_samples():
""" Undoes the changes produced by set_rf_samples.
"""
forest._generate_sample_indices = (lambda rs, n_samples:
forest.check_random_state(rs).randint(0, n_samples, n_samples))
def get_nn_mappers(df, cat_vars, contin_vars):
# Replace nulls with 0 for continuous, "" for categorical.
for v in contin_vars: df[v] = df[v].fillna(df[v].max()+100,)
for v in cat_vars: df[v].fillna('#NA#', inplace=True)
# list of tuples, containing variable and instance of a transformer for that variable
# for categoricals, use LabelEncoder to map to integers. For continuous, standardize
cat_maps = [(o, LabelEncoder()) for o in cat_vars]
contin_maps = [([o], StandardScaler()) for o in contin_vars]
return DataFrameMapper(cat_maps).fit(df), DataFrameMapper(contin_maps).fit(df)
| apache-2.0 |
NMGRL/pychron | pychron/mv/lumen_detector.py | 2 | 13744 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
from __future__ import absolute_import
from chaco.data_range_1d import DataRange1D
from chaco.default_colormaps import hot
from numpy import invert, zeros_like, asarray, max, copy, ones_like, zeros, uint8, average, ravel_multi_index
from skimage.color import rgb2gray, gray2rgb
from skimage.draw import circle, polygon
# ============= local library imports ==========================
from skimage.feature import peak_local_max
from skimage.filters import gaussian
from pychron.mv.locator import Locator
def calc_area(a):
b = asarray(a, dtype=bool)
return b.sum()
class LumenDetector(Locator):
threshold = 25
pxpermm = 23
mask_kind = 'Hole'
beam_radius = 0
custom_mask_radius = 0
hole_radius = 0
_cached_mask_value = None
grain_measuring = False
active_targets = None
_target = None
def __init__(self, *args, **kw):
super(LumenDetector, self).__init__(*args, **kw)
self._color_mapper = hot(DataRange1D(low=0, high=1))
def reset(self):
self._target = None
def draw_targets(self, *args, **kw):
self._draw_targets(*args, **kw)
def get_value(self, src, dim, threshold=10, area_threshold=10):
"""
if scaled is True
return sum of all pixels in masked area / (masked area *255)
@param src:
@param scaled:
@return:
"""
pixel_depth = self.pixel_depth
m = self._mask(src)
if not len(src.shape) == 2:
gsrc = rgb2gray(src)
tt = threshold / pixel_depth
pd = 1
else:
gsrc = src
tt = threshold / 100 * pixel_depth
pd = pixel_depth
src[src <= threshold] = 0
if not self._target:
targets = self.find_targets(None, src, dim, search={'n': 2})
marea = m.sum()
area = 0
if targets:
self.debug('found targets={}'.format(len(targets)))
for t in targets:
if t.area > marea * 0.15:
if t.area > area:
area = t.area
self._target = t
else:
area = marea
if not area:
area = marea
else:
area = self._target.area
targets = [self._target]
if self._target:
points = self._target.poly_points
pmaskrr, pmaskcc = polygon(points[:, 0], points[:, 1], gsrc.shape)
tsrc = gsrc[pmaskrr, pmaskcc]
else:
tsrc = gsrc[gsrc > tt]
# n = tsrc.shape[0]
v = 0
if area:
ss = tsrc.sum()
v = ss / (area * pd)
self.debug('v={}, sum={}, area={}, pd={}, mask={}'.format(v, ss, area, pd, m.sum()))
return src, v, targets
def find_targets(self, image, src, dim, mask=False, search=None):
targets = self._find_targets(image, src, dim,
filter_targets=False,
inverted=True,
convexity_filter=0.75,
mask=mask, search=search)
self.active_targets = None
if targets:
targets = self._filter(targets, self._target_near_center, src)
if targets:
self.active_targets = targets
if image is not None:
self._draw_targets(image.source_frame, targets)
return targets
def find_lum_peak(self, lum, dim, mask_dim, min_distance=5, blur=1):
pixel_depth = self.pixel_depth
if self.grain_measuring:
targets = self.active_targets
if targets is not None:
self.debug('active targets={}'.format(len(targets)))
else:
self.debug('no active targets')
else:
targets = self.find_targets(None, lum, dim, mask=mask_dim, search={'n': 2})
if targets:
self.debug('found targets={}'.format(len(targets)))
src = gaussian(lum, blur) * pixel_depth
# mask = self._mask(lum)
h, w = lum.shape[:2]
# pts = peak_local_max(src, min_distance=min_distance, num_peaks=10)
# pt, px, py, sat = None, None, None, None
peak_img = zeros((h, w), dtype=uint8)
if targets:
target = targets[0]
px, py = target.centroid
self._draw_targets(src, targets)
peak_img[circle(py, px, min_distance)] = 255
ilum = lum[target.mask].sum()
area = (target.area + target.pactual / 2)
# else:
# ilum = lum.sum()
# area = mask.sum()
sat = ilum / (area * pixel_depth)
pt = px - w / 2, py - h / 2, sat
# if pts.shape[0]:
# idx = tuple(pts.T)
# intensities = src.flat[ravel_multi_index(idx, src.shape)]
#
# try:
# x, y = average(pts, axis=0, weights=intensities)
# if pt is None:
# pt = x - w / 2, y - h / 2, sorted(intensities)[-1]
# px, py = x, y
#
# peak_img[circle(y, x, min_distance)] = 255
#
# except ZeroDivisionError:
# pass
return pt, px, py, peak_img, sat, src
def get_scores(self, lum, pixel_depth=None):
if pixel_depth is None:
pixel_depth = self.pixel_depth
mask = self._mask(lum)
v = lum.sum()
# x, y = peak_local_max(lum, min_distance=5, num_peaks=1)[0]
# h, w = lum.shape[:2]
# distance = ((x - w / 2.) ** 2 + (y - h / 2.) ** 2) ** 0.5
distance = 1
try:
score_density = v / (calc_area(lum) * distance)
except ZeroDivisionError:
score_density = 0
score_saturation = v / (mask.sum() * pixel_depth)
return score_density, score_saturation, lum
def _mask(self, src, radius=None):
if radius is None:
radius = self.mask_radius
return super(LumenDetector, self)._mask(src, radius)
@property
def mask_radius(self):
if self.mask_kind == 'Hole':
d = self.hole_radius
elif self.mask_kind == 'Beam':
d = max(0.1, self.beam_radius * 1.1)
else:
d = self.custom_mask_radius
return d
# ============= EOF =============================================
#
# def _lum(self, src):
# # threshold = self.threshold
# # src[src < threshold] = 0
# mask = self._mask(src)
#
# return mask
# def polygon_clip(rp, cp, r0, c0, r1, c1):
# """Clip a polygon to the given bounding box.
# Parameters
# ----------
# rp, cp : (N,) ndarray of double
# Row and column coordinates of the polygon.
# (r0, c0), (r1, c1) : double
# Top-left and bottom-right coordinates of the bounding box.
# Returns
# -------
# r_clipped, c_clipped : (M,) ndarray of double
# Coordinates of clipped polygon.
# Notes
# -----
# This makes use of Sutherland-Hodgman clipping as implemented in
# AGG 2.4 and exposed in Matplotlib.
# """
# poly = path.Path(vstack((rp, cp)).T, closed=True)
# clip_rect = transforms.Bbox([[r0, c0], [r1, c1]])
# poly_clipped = poly.clip_to_bbox(clip_rect).to_polygons()[0]
#
# # This should be fixed in matplotlib >1.5
# if all(poly_clipped[-1] == poly_clipped[-2]):
# poly_clipped = poly_clipped[:-1]
#
# return poly_clipped[:, 0], poly_clipped[:, 1]
#
# #
# def polygon_perimeter(cr, cc, shape=None, clip=False):
# """Generate polygon perimeter coordinates.
# Parameters
# ----------
# cr : (N,) ndarray
# Row (Y) coordinates of vertices of polygon.
# cc : (N,) ndarray
# Column (X) coordinates of vertices of polygon.
# shape : tuple, optional
# Image shape which is used to determine maximum extents of output pixel
# coordinates. This is useful for polygons which exceed the image size.
# By default the full extents of the polygon are used.
# clip : bool, optional
# Whether to clip the polygon to the provided shape. If this is set
# to True, the drawn figure will always be a closed polygon with all
# edges visible.
# Returns
# -------
# pr, pc : ndarray of int
# Pixel coordinates of polygon.
# May be used to directly index into an array, e.g.
# ``img[pr, pc] = 1``.
# """
#
# if clip:
# if shape is None:
# raise ValueError("Must specify clipping shape")
# clip_box = array([0, 0, shape[0] - 1, shape[1] - 1])
# else:
# clip_box = array([min(cr), min(cc),
# max(cr), max(cc)])
#
# # Do the clipping irrespective of whether clip is set. This
# # ensures that the returned polygon is closed and is an array.
# cr, cc = polygon_clip(cr, cc, *clip_box)
#
# cr = round(cr).astype(int)
# cc = round(cc).astype(int)
#
# # Construct line segments
# pr, pc = [], []
# for i in range(len(cr) - 1):
# line_r, line_c = line(cr[i], cc[i], cr[i + 1], cc[i + 1])
# pr.extend(line_r)
# pc.extend(line_c)
#
# pr = asarray(pr)
# pc = asarray(pc)
#
# if shape is None:
# return pr, pc
# else:
# return _coords_inside_image(pr, pc, shape)
#
# class PolygonLocator:
# # def segment(self, src):
# # markers = threshold_adaptive(src, 10)
# # # n = markers[:].astype('uint8')
# # # n = markers.astype('uint8')
# # # n[markers] = 255
# # # n[not markers] = 1
# # # markers = n
# #
# # # elmap = sobel(image, mask=image)
# # elmap = canny(src, sigma=1)
# # wsrc = watershed(elmap, markers, mask=src)
# #
# # return invert(wsrc)
# block_size = 20
#
# # def _preprocess(self, src):
# # markers = threshold_adaptive(src, self.block_size)
# #
# # # n = markers[:].astype('uint8')
# # n = markers.astype('uint8')
# # n[markers] = 255
# # n[invert(markers)] = 1
# # markers = n
# #
# # # elmap = sobel(image, mask=image)
# # elmap = canny(src, sigma=1)
# # wsrc = watershed(elmap, markers, mask=src)
# # return invert(wsrc)
# #
# # def find_targets(self, src):
# # frm = grayspace(src) * 255
# # src = frm.astype('uint8')
# #
# # src = self._preprocess(src)
# #
# # # for i, contour in enumerate(find_contours(src, 0)):
# # # coords = approximate_polygon(contour, tolerance=0)
# # # x, y = coords.T
# # # # print i, x,y
# # # # rr, cc = polygon_perimeter(y, x)
# # # rr, cc = polygon(y, x)
# # #
# # # src[cc, rr] = 100
# #
# # print 'found contours'
# # lsrc = label(src)
# # r, c = src.shape
# # ts = []
# # for i, rp in enumerate(regionprops(lsrc)):
# # cy, cx = rp.centroid
# # print 'region prop', i, cx, cy
# # # cy += 1
# # # cx += 1
# # tx, ty = cx - c / 2., cy - r / 2.
# # src[cy, cx] = 175
# # t = int(tx), int(ty)
# # if t not in ts:
# # ts.append((rp, t))
# # return ts, src
#
# def find_best_target(self, osrc):
# targetxy = None
# ts, src = self.find_targets(osrc)
# if ts:
# scores = []
# if len(ts) > 1:
# for rp, center in ts:
# score = self.calculate_target_score(osrc, rp)
# scores.append((score, center))
#
# targetxy = sorted(scores)[-1][1]
# else:
# targetxy = ts[0][1]
#
# return targetxy, src
#
# def calculate_target_score(self, src, rp):
# rr, cc = rp.coords.T
# region = src[rr, cc]
# score = region.sum() / float(rp.area)
# return score
# def find_best_target(self, src):
# p = PolygonLocator()
# targetxy, src = p.find_best_target(src)
#
# return targetxy, src
#
# def lum(self, src):
# lum, mask = self._lum(src)
# return lum, mask
#
# def get_value(self, src, scaled=True):
# """
#
# if scaled is True
# return sum of all pixels in masked area / (masked area *255)
#
# @param src:
# @param scaled:
# @return:
# """
#
# lum, mask = self._lum(src)
#
# v = lum.sum()
#
# if scaled:
# v /= (mask.sum() * 255.)
# # print lum.sum(), v
# return src, v
#
| apache-2.0 |
nschaetti/nsNLP | tools/ResultManager.py | 1 | 27077 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Auteur : Nils Schaetti <[email protected]>
# Date : 01.02.2017 17:59:05
# Lieu : Nyon, Suisse
#
# This file is part of the nsNLP Project.
# The nsNLP Project is a set of free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nsNLP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
#
import os
import codecs
import pickle
import datetime
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from sklearn.utils.extmath import cartesian
import csv
import scipy
import sys
import LatexPlots
# Manage and save results
class ResultManager(object):
"""
Manage and save results
"""
# Constructor
def __init__(self, output_dir, name, description, params_dict, n_samples, k=10, verbose=2, nan=False):
"""
Constructor
:param params_dict:
"""
# Properties
self._output_dir = output_dir
self._name = name
self._description = description
self._params_dict = params_dict
self._n_samples = n_samples
self._k = k
self._fold = 0
self._sample = 0
self._n_dim = len(params_dict.keys()) + 2
self._verbose = verbose
self._objects = list()
self._nan = nan
# Param to dimension
self._param2dim = dict()
self._value2pos = dict()
for index, param in enumerate(params_dict.keys()):
self._param2dim[param] = index
self._value2pos[param] = dict()
for index2, value in enumerate(params_dict[param]):
if type(value) == list:
self._value2pos[param][u"{}".format(value)] = index2
else:
self._value2pos[param][value] = index2
# end if
# end for
# end for
# Current parameter value
self._pos = dict()
for param in params_dict.keys():
self._pos[param] = params_dict[param][0]
# end for
# Generate result matrix
self._result_matrix = self._generate_matrix()
# Create directory
self._xp_dir, self._obj_dir = self._create_directory()
# Open the output file
self._output_file = self._open_log_file(os.path.join(self._xp_dir, u"output.log"))
# Write log header
self._write_log_header()
# Open CSV result file
self._csv_results = self._create_csv_results(os.path.join(self._xp_dir, u"output.csv"))
# Log
self._write_log(u"Starting experiment {}".format(name), log_level=0)
self._write_log(u"Result matrix is of dimension {}".format(self._n_dim), log_level=0)
self._write_log(u"Result matrix is of shape {}".format(self._result_matrix.shape), log_level=0)
# end __init__
###########################################
# Public
###########################################
# Change parameter state
def set_state(self, pos):
"""
Change parameter state
:param param:
:param value:
:return:
"""
if self._verbose:
self._write_log(u"\tChanging param state to {}".format(pos), log_level=1)
# end if
# Params
for param in pos.keys():
self._pos[param] = pos[param]
# end for
# end set_state
# Change sample state
def set_sample_state(self, n_sample):
"""
Change sample state
:param n_sample:
:return:
"""
if self._verbose:
self._write_log(u"\t\tChanging sample state to {}".format(n_sample), log_level=2)
# end if
self._sample = n_sample
# end set_sample_state
# Change fold state
def set_fold_state(self, k):
"""
Change fold state
:param k:
:return:
"""
if self._verbose:
self._write_log(u"\t\t\tChanging fold state to {}".format(k), log_level=3)
# end if
self._fold = k
# end set_fold_state
# Save result
def add_result(self, success_rate, last_fold=False):
"""
Save result
:param success_rate:
:return:
"""
# Element pos
element_pos = [0] * self._n_dim
# For each param
for param in self._param2dim.keys():
# Dim
dim = self._param2dim[param]
# Pos of value
if type(self._pos[param]) is list:
pos = self._value2pos[param][u"{}".format(self._pos[param])]
else:
pos = self._value2pos[param][self._pos[param]]
# end if
# Set
element_pos[dim] = pos
# end for
# Sample
element_pos[-2] = self._sample
# Fold
element_pos[-1] = self._fold
# Set
self._result_matrix[tuple(element_pos)] = success_rate
# Verbose
if self._verbose:
# Write fold success rate
self._write_log(u"\t\t\t\tSuccess rate {}".format(success_rate), log_level=3)
# Last fold?
if self._fold + 1 == self._k or last_fold:
k_pos = element_pos
k_pos[-1] = slice(None)
self._write_log(u"\t\t\t{}-fold success rate {}".format(self._k, np.nanmean(self._result_matrix[tuple(k_pos)])), log_level=2)
# Last sample?
if self._sample + 1 == self._n_samples:
n_pos = k_pos
n_pos[-2] = slice(None)
# Folds perfs
folds_perfs = np.nanmean(self._result_matrix[tuple(n_pos)], axis=-1).flatten()
# Print
self._write_log(u"\t\t{} samples success rate {} +- {}".format(self._n_samples, np.nanmean(folds_perfs), np.nanstd(folds_perfs)), log_level=1)
self._write_log(u"\t\tMax sample success rate {}".format(np.max(folds_perfs)), log_level=1)
# end if
# end if
# end if
# Write in CSV
self._write_csv_result(success_rate)
# end add_result
# Save results
def save(self):
"""
Save results
:return:
"""
# Save overall success rate
self._write_log(u"\tOverall success rate: {}".format(np.nanmean(self._result_matrix)), log_level=0)
# Save result matrix
self.save_object(u"result_matrix", self._result_matrix)
# Save global data
self._save_global()
# For each param
for param in self._params_dict.keys():
# If there is more than
# one value.
if len(self._params_dict[param]) > 1:
self._save_param_data(param)
# end if
# end for
# end save
# Save object
def save_object(self, name, obj, info=""):
"""
Add object
:param name: Object's name
:param obj: Object
:return:
"""
# Write
with open(os.path.join(self._obj_dir, name + u".p"), 'wb') as f:
pickle.dump(obj=obj, file=f)
# end with
# Infos?
if info != "":
with open(os.path.join(self._obj_dir, name + u".txt"), 'w') as f:
f.write(info + u"\n")
# end with
# end if
# end add_object
# Write log
def write(self, text, log_level):
"""
Write log
:param text:
:param log_level:
:return:
"""
self._write_log(text, log_level)
# end write
###########################################
# Private
###########################################
# Write result in CSV
def _write_csv_result(self, success_rate):
"""
Write result in CSV
:param sucess_rate:
:return:
"""
# Row list
row_list = list()
# First to last param
for param_index in range(len(self._params_dict.keys())):
# For each param
for param in self._params_dict.keys():
if param_index == self._param2dim[param]:
row_list.append(self._pos[param])
# end if
# end for
# end for
# Add sample and fold
row_list.append(self._sample)
row_list.append(self._fold)
# Add result
row_list.append(success_rate)
# Write
self._csv_results.writerow(row_list)
# end _write_csv_result
# Write log header
def _write_log_header(self):
"""
Write log header
:return:
"""
self._write_log(u"Arguments : {}".format(sys.argv), log_level=0)
self._write_log(u"Experience name : {}".format(self._name), log_level=0)
self._write_log(u"Description : {}".format(self._description), log_level=0)
self._write_log(u"Date : {}".format(datetime.datetime.utcnow()), log_level=0)
# end _write_log_header
# Create directory
def _create_directory(self):
"""
Create the experience directory
:return:
"""
# XP directory
self._xp_dir = os.path.join(self._output_dir, self._name)
# Object directory
self._obj_dir = os.path.join(self._xp_dir, u"objects")
# Create if necessary
if not os.path.exists(self._xp_dir):
os.mkdir(self._xp_dir)
# end if
# Create if necessary
if not os.path.exists(self._obj_dir):
os.mkdir(self._obj_dir)
# end if
return self._xp_dir, self._obj_dir
# end _create_directory
# Write log
def _write_log(self, text, log_level):
"""
Write log
:param text:
:return:
"""
if log_level <= self._verbose:
print(text)
# end if
self._output_file.write(text + u"\n")
# end _write_log
# Open the output log file
def _open_log_file(self, filename):
"""
Open the output log file
:param filename:
:return:
"""
return codecs.open(filename, 'w', encoding='utf-8')
# end _open_log_file
# Save global data
def _save_global(self):
"""
Save global data
:return:
"""
# Result by samples
sample_results = np.nanmean(self._result_matrix, axis=-1)
# Save
self._save_histogram(os.path.join(self._xp_dir, u"overall_results.png"), self._result_matrix.flatten(),
u"Overall results", u"Result", u"Proportion")
# Save result by samples
self._save_histogram(os.path.join(self._xp_dir, u"samples_results.png"), sample_results.flatten(),
u"Samples results", u"Result", u"Proportion")
# Show max samples results
max_result, max_std, max_params = self._get_max_parameters()
self._write_log(u"\tBest perf with {} +- {} : {}".format(max_result, max_std, max_params), log_level=0)
# Show overall samples results
max_result, max_std, max_params = self._get_max_parameters(samples=False)
self._write_log(u"\tBest perf with {} +- {} : {}".format(max_result, max_std, max_params), log_level=0)
# end _save_global
# Get max parameters
def _get_max_parameters(self, samples=True, select_dim=None, select_value=None):
"""
Get parameters with maximum results
:return:
"""
# Max values
max_result = 0
max_pos = ()
max_std = 0
# Remove
if samples:
n_remove = 1
else:
n_remove = 2
# end if
# Array of positions
pos_array = list()
# For each dimension
for dim in range(self._n_dim - n_remove):
if select_dim is not None and select_value is not None and dim == select_dim:
pos_array.append([select_value])
else:
# Size of this dim
pos_array.append(np.arange(0, self._result_matrix.shape[dim]))
# end if
# end for
# Cartesian product
cart_product = cartesian(np.array(pos_array))
# For each pos
for pos in cart_product:
# Position
pos_tuple = pos.tolist()
# Get result
pos_result = np.nanmean(self._result_matrix[tuple(pos_tuple)])
# Max?
if pos_result > max_result:
max_result = pos_result
max_pos = pos_tuple
max_std = np.nanstd(self._result_matrix[tuple(pos_tuple)])
# end if
# end for
# Return max, std, and parameters
return max_result, max_std, self._pos_to_dict(max_pos)
# end _get_max_parameters
# Position to dictionary
def _pos_to_dict(self, position):
"""
Position to dictionary
:return:
"""
# Max. pos
max_pos = {}
# For each dimension
for index, pos in enumerate(position):
# For each parameters
for param in self._params_dict.keys():
if index == self._param2dim[param]:
for param_value in self._params_dict[param]:
the_param_value = param_value
if type(the_param_value) is list:
the_param_value = u"{}".format(the_param_value)
# end if
if pos == self._value2pos[param][the_param_value]:
max_pos[param] = the_param_value
# end if
# end for
# end if
# end for
# end for
# Sample
if len(position) == self._n_dim-1:
max_pos['samples'] = position[-1]
# end if
return max_pos
# end _pos_to_dict
# Save param data
def _save_param_data(self, param, sub_dir=u"", pos_dim=None, pos_value=None):
"""
Save param data
:return:
"""
# Get data directory
if sub_dir != u"":
param_path = os.path.join(sub_dir, param)
else:
param_path = os.path.join(self._xp_dir, param)
# end if
print(param_path)
# Create directory
if not os.path.exists(param_path):
os.mkdir(param_path)
# end if
# Value type
value_type = 'numeric'
# Open the parameter report
param_report = codecs.open(os.path.join(param_path, u"report.txt"), 'w', encoding='utf-8')
# Param dimension
dim = self._param2dim[param]
# Possible values
values = self._params_dict[param]
# Number of values
n_values = len(values)
# Values samples
value_samples = dict()
value2sample = dict()
# Plot value
plot_results = np.array([])
plot_std = np.array([])
# Perf per values
value2perf = dict()
value2std = dict()
# All samples
all_samples = np.array([])
# List?
value_list = False
# Sample per values
for index, value in enumerate(values):
# Value type
if type(value) == str or type(value) == unicode or type(value) is list:
value_type = 'str'
# end if
# List
the_value = value
if type(the_value) is list:
value_list = True
the_value = u"{}".format(the_value)
# end if
# All range
position_vector = [slice(None)] * self._n_dim
# Restrict to upper level (if needed)
if pos_dim is not None and pos_value is not None:
position_vector[pos_dim] = pos_value
# end if
# Value position
if type(the_value) is list:
value_pos = self._value2pos[param][u"{}".format(the_value)]
else:
value_pos = self._value2pos[param][the_value]
# end if
# Set index
position_vector[dim] = value_pos
# Samples
samples = self._result_matrix[tuple(position_vector)]
# Samples perfs
samples_results = np.nanmean(samples, axis=-1).flatten()
# Save histogram for this value
self._save_histogram(os.path.join(param_path, u"hist_" + unicode(the_value) + u".png"), samples_results,
u"Histogram " + unicode(the_value), u"Result", u"%")
# Add to dict
value_samples[the_value] = np.ascontiguousarray(samples)
value_samples[the_value].shape = (-1, self._k)
value2sample[the_value] = samples_results
# Add to plot
plot_results = np.append(plot_results, np.nanmean(samples_results))
plot_std = np.append(plot_std, np.nanstd(samples_results))
# Value to perf
value2perf[the_value] = np.nanmean(samples_results)
value2std[the_value] = np.nanstd(samples_results)
# Write best perf in the report
max_result, max_std, max_params = self._get_max_parameters(samples=True, select_dim=dim, select_value=value_pos)
param_report.write(u"Best perf with {} +- {} : {}\n\n".format(max_result, max_std, max_params))
# Add to all samples
if all_samples.size == 0:
all_samples = samples_results
else:
all_samples = np.vstack((all_samples, samples_results))
# end if
# Add information with other params if needed
if pos_dim is None and pos_value is None:
for sub_param in self._params_dict.keys():
if param != sub_param and len(self._params_dict[sub_param]) > 1:
# Path
sub_param_path = os.path.join(param_path, unicode(the_value))
# Create directory
if not os.path.exists(sub_param_path):
os.mkdir(sub_param_path)
# end if
# Recursive call!
self._save_param_data(sub_param, sub_dir=sub_param_path, pos_dim=self._param2dim[param],
pos_value=self._value2pos[param][the_value])
# end if
# end for
# end if
# end for
# Save the plot
if value_type == 'numeric':
self._save_plot(os.path.join(param_path, u"plot.png"), values, plot_results, plot_std,
u"Results vs {}".format(param), param, u"Results")
else:
# Create data values
samples_per_values = np.zeros((all_samples.shape[1], all_samples.shape[0]))
for i in np.arange(0, all_samples.shape[1]):
samples_per_values[i, :] = all_samples[:, i].flatten()
# end for
# If values are list
value_labels = values
if value_list:
labels = list()
for label_entry in values:
labels.append(u"{}".format(label_entry))
# end for
value_labels = labels
# end if
# Boxplot
self._save_boxplot(os.path.join(param_path, u"plot.png"), samples_per_values, value_labels,
u"Results vs {}".format(param), param, u"Results")
# end if
# Write param CSV
self._write_param_csv(os.path.join(param_path, u"samples.csv"), value2sample)
# Write param tests
self._write_param_tests(os.path.join(param_path, u"t-tests.csv"), value_samples)
# end _save_param_data
# Write param tests
def _write_param_tests(self, filename, value2samples):
"""
Write param tests
:param filename:
:param value2samples:
:return:
"""
# Values
values = value2samples.keys()
# Create CSV
c = csv.writer(open(filename, 'wb'))
# Write header
c.writerow([u""] + values)
# T-tests values
t_tests = dict()
# For each value
for value1 in values:
t_tests[value1] = dict()
for value2 in values:
if value1 != value2:
value1_samples = value2samples[value1].flatten()
value2_samples = value2samples[value2].flatten()
t_tests[value1][value2] = scipy.stats.ttest_rel(value1_samples[np.isfinite(value1_samples)],
value2_samples[np.isfinite(value2_samples)]).pvalue
else:
t_tests[value1][value2] = 0.0
# end if
# end for
# end for
# For each value
for value1 in values:
ttest_row = [value1]
for value2 in values:
ttest_row.append(t_tests[value1][value2])
# end for
c.writerow(ttest_row)
# end for
# end _write_param_tests
# Write param CSV
def _write_param_csv(self, filename, value2samples):
"""
Write param CSV
:param filename:
:param value2samples:
:return:
"""
# Values
values = value2samples.keys()
# Create CSV
c = csv.writer(open(filename, 'wb'))
# Write header
c.writerow(values)
# For each sample
for index in range(len(value2samples[values[0]])):
sample_row = list()
for value in values:
sample_row.append(value2samples[value][index])
# end for
c.writerow(sample_row)
# end for
# end _write_param_csv
# Save plot
def _save_plot(self, filename, x, y, data_std, title, xlabel, ylabel):
"""
Save plot
:param filename:
:param data:
:param title:
:param x_label:
:param y_label:
:return:
"""
# Save figure
plt.figure()
plt.errorbar(x, y, yerr=data_std)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(filename)
plt.close()
# Save Latex plot
self._save_plot_latex(filename + u".tex", x, y, title, xlabel, ylabel)
# end _save_plot
# Save plot as Latex file
def _save_plot_latex(self, filename, x, y, yerr, title, xlabel=u"", ylabel=u""):
"""
Save plot as Latex file
:param filename:
:param data:
:param title:
:param xlabel:
:param ylabel:
:return:
"""
# No underscore
title = title.replace(u"_", u"-")
xlabel = xlabel.replace(u"_", u"-")
ylabel = ylabel.replace(u"_", u"-")
# Latex template
latex_template = LatexPlots.latex_start + LatexPlots.latex_plot + LatexPlots.latex_end
# Latex data
latex_data = u""
# Data
for index, x_pos in enumerate(x):
latex_data += u"({}, {}) +- (0.{})\n".format(x_pos, y[index], yerr[index])
# end for
# Write
with codecs.open(filename, 'w', encoding='utf-8') as f:
f.write(latex_template % (title, title, xlabel, ylabel, latex_data, u""))
# end with
# end _save_histogram_latex
# Save boxplot
def _save_boxplot(self, filename, data, labels, title, xlabel=u"", ylabel=u""):
"""
Save boxplot
:param filename:
:param data:
:param title:
:param xlabel:
:param ylabel:
:return:
"""
plt.boxplot(x=data, labels=labels)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(filename)
plt.close()
# end _save_scatterplot
# Generate result matrix
def _generate_matrix(self):
"""
Generate matrix
:return:
"""
# Dim counters
dims = list()
# For each param
for param in self._params_dict.keys():
dims.append(len(self._params_dict[param]))
# end for
# Add samples
dims.append(self._n_samples)
# Add cross validation
dims.append(self._k)
if not self._nan:
return np.zeros(dims)
else:
m = np.zeros(dims)
m[:] = np.nan
return m
# end if
# end _generate_matrix
# Save histogram
def _save_histogram(self, filename, data, title, xlabel=u"", ylabel=u""):
"""
Save histogram
:param data:
:return:
"""
# Save figure
plt.hist(data[np.isfinite(data)], normed=True)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(filename)
plt.close()
# Save latex data
self._save_histogram_data(filename, data)
# end _save_histogram
# Save histogram as data
def _save_histogram_data(self, filename, data):
"""
Save histogram as data for latex
:param filename:
:param data:
:return:
"""
with codecs.open(filename + u".csv", 'w', encoding='utf-8') as f:
f.write(u"success_rate\n")
for d in data[np.isfinite(data)]:
f.write(u"{}\n".format(d))
# end for
# end with
# end _save_histogram_data
# Create CSV file
def _create_csv_results(self, filename):
"""
Create CSV file
:param filename:
:return:
"""
# Writer
c = csv.writer(open(filename, 'wb'))
# Row list
row_list = list()
# First to last param
for param_index in range(len(self._params_dict.keys())):
# For each param
for param in self._params_dict.keys():
if param_index == self._param2dim[param]:
row_list.append(param)
# end if
# end for
# end for
# Append samples and fold
row_list.append(u"samples")
row_list.append(u"fold")
row_list.append(u"result")
# Write header
c.writerow(row_list)
return c
# end _create_csv_results
# end
| gpl-3.0 |
kshedstrom/pyroms | pyroms_toolbox/pyroms_toolbox/quiver.py | 2 | 5082 | from numpy import *
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import pyroms
def quiver(uvar, vvar, tindex, depth, gridid, \
filename=None, proj=None, d=2, uscale=None, max_length=None, \
xkey=0.9, ykey=0.1, ukey=1, outfile=None):
"""
quiver(uvar, vvar, tindex, depth, gridid)
optional switch:
- filename if defined, load the variable from file
- proj Basemap object returned by sview, zview, ...
- d arrow density parameter
- uscale data units per arrow length unit parameter
- max_length if defined, set maximum arrow length displayed
- xkey x location of the key
- ykey y location of the key
- ukey length of the key
- outfile if defined, write figure to file
overlay a 2-D field of arrows for velocity (uvar, vvar) above an
existing horizontal 2D plot. If filename is provided, uvar and vvar
must be strings and the variables will be load from the file.
grid can be a grid object or a gridid. In the later case, the grid
object correponding to the provided gridid will be loaded.
For projection, use proj=map, map being the Basemap object returned
by sview, zview, ...
Note: if quiver is called before any other part of the plot has been
created, you must create an axis which covers the region to be plotted.
to do this, you can call axis([Longitude_min,Longitude_max,Latitude_min,Latitude_max]
where Longitude_min, etc, are replaced with the appropriate longitudes and latitudes.
"""
# get grid
if type(gridid).__name__ == 'ROMS_Grid':
grd = gridid
else:
grd = pyroms.grid.get_ROMS_grid(gridid)
lon = grd.hgrid.lon_rho
lat = grd.hgrid.lat_rho
mask = grd.hgrid.mask_rho
# get u and v
if filename == None:
if tindex is not -1:
assert len(uvar.shape) == 4, 'uvar must be 4D (time plus space).'
assert len(vvar.shape) == 4, 'vvar must be 4D (time plus space).'
else:
assert len(uvar.shape) == 3, 'uvar must be 3D (no time dependency).'
assert len(vvar.shape) == 3, 'vvar must be 3D (no time dependency).'
if tindex == -1:
u = uvar[:,:,:]
v = vvar[:,:,:]
else:
u = uvar[tindex,:,:,:]
v = vvar[tindex,:,:,:]
else:
data = pyroms.io.Dataset(filename)
u = data.variables[uvar][tindex,:,:,:]
v = data.variables[vvar][tindex,:,:,:]
# get u and v slice at requested depth
zsliceu, lonu, latu = pyroms.tools.zslice(u, depth, grd, Cpos='u')
zslicev, lonv, latv = pyroms.tools.zslice(v, depth, grd, Cpos='v')
# average field at rho point position
zsliceu = 0.5 * (zsliceu[:,:-1] + zsliceu[:,1:])
zsliceu = zsliceu[:,r_[0,:size(zsliceu,1),-1]]
zsliceu = ma.masked_where(mask == 0, zsliceu)
zsliceu = ma.masked_where(zsliceu >= 1000, zsliceu)
zslicev = 0.5 * (zslicev[:-1,:] + zslicev[1:,:])
zslicev = zslicev[r_[0,:size(zslicev,0),-1],:]
zslicev = ma.masked_where(mask == 0, zslicev)
zslicev = ma.masked_where(zslicev >= 1000, zslicev)
U = zsliceu + 1j * zslicev
# rotate velocity vector according to grid angle
U = U * exp(1j * grd.hgrid.angle_rho)
#limit arrow length to max length if requested
if max_length is not None:
idx = where(abs(U) > max_length)
U[idx] = U[idx] * max_length / abs(U[idx])
# plot
if proj is not None:
x, y = proj(lon,lat)
else:
range = plt.axis()
if uscale is None:
if proj is not None:
qv = Basemap.quiver(proj, x[::d,::d], y[::d,::d], \
real(U[::d,::d]), imag(U[::d,::d]), \
linewidths=0.01)
else:
qv = plt.quiver(lon[::d,::d], lat[::d,::d], \
real(U[::d,::d]), imag(U[::d,::d]), \
linewidths=0.01)
else:
if proj is not None:
qv = Basemap.quiver(proj, x[::d,::d], y[::d,::d], \
real(U[::d,::d]), imag(U[::d,::d]), \
scale=uscale, linewidths=0.01)
else:
qv = plt.quiver(lon[::d,::d], lat[::d,::d], \
real(U[::d,::d]), imag(U[::d,::d]), \
scale=uscale, linewidths=0.01)
if proj is None:
plt.axis(range)
plt.quiverkey(qv, xkey, ykey, ukey, str(ukey) + ' ms$^{-1}$')
if outfile is not None:
if outfile.find('.png') != -1 or outfile.find('.svg') != -1 or \
outfile.find('.eps') != -1:
print 'Write figure to file', outfile
plt.savefig(outfile, dpi=100, facecolor='w', edgecolor='w', \
orientation='portrait')
else:
print 'Unrecognized file extension. Please use .png, .svg or .eps file extension.'
return
| bsd-3-clause |
zooniverse/aggregation | experimental/serengeti/IAAI/confusionMatrixConsistency2.py | 2 | 3041 | #!/usr/bin/env python
__author__ = 'greg'
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
if os.path.exists("/home/ggdhines/github/pyIBCC/python"):
baseDir = "/home/ggdhines/"
else:
baseDir = "/home/greg/"
species = "wildebeest"
count = {}
with open(baseDir + "Downloads/expert_classifications_raw.csv","rU") as csvfile:
goldreader = csv.reader(csvfile)
next(goldreader, None)
for line in goldreader:
photoID = line[2]
if not(photoID in count):
count[photoID] = {}
species = line[12]
try:
numAnimals = int(line[13])
except ValueError:
continue
count[photoID][species] = numAnimals
wildebeestCount = {}
for photoID,classification in count.items():
numAnimals = sum(classification.values())
if classification.keys() == ["wildebeest"]:
wildebeestCount[photoID] = numAnimals
correct = [0 for i in range(11)]
total = [0. for i in range(11)]
total = {}
correct = {}
values = []
userName = "Sobottka"
with open(baseDir +"Databases/goldMergedSerengeti.csv","rb") as csvfile:
zooreader = csv.reader(csvfile,delimiter="\t")
for l in zooreader:
photoID,userID = l[0].split(",")
if photoID in wildebeestCount:
if not(userID in total):
total[userID] = [0. for i in range(11)]
correct[userID] = [0. for i in range(11)]
classification = l[1].split(",")
speciesList = [s.split(":")[0] for s in classification]
if species in speciesList:
correct[userID][wildebeestCount[photoID]] += 1
if (userID == userName) and (wildebeestCount[photoID] == 10):
if species in speciesList:
print "++ " + str(photoID)
else:
print "-- " + str(photoID)
total[userID][wildebeestCount[photoID]] += 1
userPercentage = [[] for i in range(0,11)]
for userID in total:
if total[userID][1] == correct[userID][1]:
continue
if min(total[userID][1:]) == 0:
continue
if userID != userName:
continue
values.append((userID,correct[userID][1]/total[userID][1]))
for n in range(1,11):
if total[userID][n] == 0:
continue
userPercentage[n].append(correct[userID][n]/total[userID][n])
print values
print userPercentage
#for userID in userPercentage:
# print userID,userPercentage[userID][1]
percentage = [np.median(p)*100 for p in userPercentage[1:]]
std = [np.std(p) for p in userPercentage[1:]]
print 1-percentage[0]
print userPercentage
print correct[userName]
print total[userName]
#percentage = [c/t for (c,t) in zip(correct[1:],total[1:])]
plt.plot(range(1,11),percentage,'-o',color='black')
plt.plot(range(1,11), [(1-(1-percentage[0]/100.)**n)*100 for n in range(1,11)],'--',color='black')
plt.xlabel("Number of Wildebeest in Picture")
plt.ylabel("Percentage of Correct Classifications")
plt.legend(('Actual','Expected'))
plt.show()
| apache-2.0 |
bikong2/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
mfjb/scikit-learn | examples/classification/plot_lda.py | 164 | 2224 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
| bsd-3-clause |
zzcclp/spark | python/pyspark/pandas/tests/data_type_ops/test_base.py | 13 | 3913 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pandas.api.types import CategoricalDtype
from pandas.api.extensions import ExtensionDtype
from pyspark.pandas.data_type_ops.base import DataTypeOps
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps
from pyspark.pandas.data_type_ops.null_ops import NullOps
from pyspark.pandas.data_type_ops.num_ops import IntegralOps, FractionalOps, DecimalOps
from pyspark.pandas.data_type_ops.string_ops import StringOps
from pyspark.pandas.data_type_ops.udt_ops import UDTOps
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
DecimalType,
FractionalType,
IntegralType,
MapType,
NullType,
StringType,
StructType,
TimestampType,
UserDefinedType,
)
class BaseTest(unittest.TestCase):
def test_data_type_ops(self):
_mock_spark_type = DataType()
_mock_dtype = ExtensionDtype()
_mappings = (
(CategoricalDtype(), _mock_spark_type, CategoricalOps),
(_mock_dtype, DecimalType(), DecimalOps),
(_mock_dtype, FractionalType(), FractionalOps),
(_mock_dtype, IntegralType(), IntegralOps),
(_mock_dtype, StringType(), StringOps),
(_mock_dtype, BooleanType(), BooleanOps),
(_mock_dtype, TimestampType(), DatetimeOps),
(_mock_dtype, DateType(), DateOps),
(_mock_dtype, BinaryType(), BinaryOps),
(_mock_dtype, ArrayType(StringType()), ArrayOps),
(_mock_dtype, MapType(StringType(), IntegralType()), MapOps),
(_mock_dtype, StructType(), StructOps),
(_mock_dtype, NullType(), NullOps),
(_mock_dtype, UserDefinedType(), UDTOps),
)
for _dtype, _spark_type, _ops in _mappings:
self.assertIsInstance(DataTypeOps(_dtype, _spark_type), _ops)
_unknow_spark_type = _mock_spark_type
self.assertRaises(TypeError, DataTypeOps, BooleanType(), _unknow_spark_type)
def test_bool_ext_ops(self):
from pyspark.pandas.typedef.typehints import extension_object_dtypes_available
if extension_object_dtypes_available:
from pandas import BooleanDtype
self.assertIsInstance(DataTypeOps(BooleanDtype(), BooleanType()), BooleanExtensionOps)
else:
self.assertIsInstance(DataTypeOps(ExtensionDtype(), BooleanType()), BooleanOps)
if __name__ == "__main__":
from pyspark.pandas.tests.data_type_ops.test_base import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
SpaceKatt/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/backends/backend_ps.py | 2 | 57561 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
from __future__ import division
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import mkstemp
from cStringIO import StringIO
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib.backends.backend_mixed import MixedModeRenderer
import numpy as np
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
class PsBackendHelper(object):
def __init__(self):
self._cached = {}
@property
def gs_exe(self):
"""
excutable name of ghostscript.
"""
try:
return self._cached["gs_exe"]
except KeyError:
pass
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
self._cached["gs_exe"] = gs_exe
return gs_exe
@property
def gs_version(self):
"""
version of ghostscript.
"""
try:
return self._cached["gs_version"]
except KeyError:
pass
from subprocess import Popen, PIPE
pipe = Popen(self.gs_exe + " --version",
shell=True, stdout=PIPE).stdout
gs_version = tuple(map(int, pipe.read().strip().split(".")))
self._cached["gs_version"] = gs_version
return gs_version
@property
def supports_ps2write(self):
"""
True if the installed ghostscript supports ps2write device.
"""
return self.gs_version[0] >= 9
ps_backend_helper = PsBackendHelper()
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = papersize.keys()
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return np.alltrue(np.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self._hatches = {}
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
self._afm_font_dir = os.path.join(
rcParams['datapath'], 'fonts', 'afm')
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname,fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def create_hatch(self, hatch):
sidelen = 72
if self._hatches.has_key(hatch):
return self._hatches[hatch]
name = 'H%d' % len(self._hatches)
self._pswriter.write("""\
<< /PatternType 1
/PaintType 2
/TilingType 2
/BBox[0 0 %(sidelen)d %(sidelen)d]
/XStep %(sidelen)d
/YStep %(sidelen)d
/PaintProc {
pop
0 setlinewidth
""" % locals())
self._pswriter.write(
self._convert_path(Path.hatch(hatch), Affine2D().scale(72.0),
simplify=False))
self._pswriter.write("""\
stroke
} bind
>>
matrix
makepattern
/%(name)s exch def
""" % locals())
self._hatches[hatch] = name
return name
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm', directory=self._afm_font_dir)
if fname is None:
fname = findfont(
"Helvetica", fontext='afm', directory=self._afm_font_dir)
font = self.afmfontd.get(fname)
if font is None:
font = AFM(file(fname))
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = np.fromstring(s, np.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = np.fromstring(rgbat[2], np.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(np.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(np.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def option_scale_image(self):
"""
ps backend support arbitrary scaling of image.
"""
return True
def _get_image_h_w_bits_command(self, im):
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
return h, w, bits, imagecmd
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
dx, dy is the width and height of the image. If a transform
(which must be an affine transform) is given, x, y, dx, dy are
interpreted as the coordinate of the transform.
"""
im.flipud_out()
h, w, bits, imagecmd = self._get_image_h_w_bits_command(im)
hexlines = '\n'.join(self._hex_lines(bits))
if dx is None:
xscale = w / self.image_magnification
else:
xscale = dx
if dy is None:
yscale = h/self.image_magnification
else:
yscale = dy
if transform is None:
matrix = "1 0 0 1 0 0"
else:
matrix = " ".join(map(str, transform.to_values()))
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
bbox = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
[%(matrix)s] concat
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
# unflip
im.flipud_out()
def _convert_path(self, path, transform, clip=False, simplify=None):
ps = []
last_points = None
if clip:
clip = (0.0, 0.0, self.width * 72.0,
self.height * 72.0)
else:
clip = None
for points, code in path.iter_segments(transform, clip=clip,
simplify=simplify):
if code == Path.MOVETO:
ps.append("%g %g m" % tuple(points))
elif code == Path.CLOSEPOLY:
ps.append("cl")
elif last_points is None:
# The other operations require a previous point
raise ValueError('Path lacks initial MOVETO')
elif code == Path.LINETO:
ps.append("%g %g l" % tuple(points))
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
ps.append("%g %g %g %g %g %g c" %
tuple(points[2:]))
elif code == Path.CURVE4:
ps.append("%g %g %g %g %g %g c" % tuple(points))
last_points = points
ps = "\n".join(ps)
return ps
def _get_clip_path(self, clippath, clippath_transform):
id = self._clip_paths.get((clippath, clippath_transform))
if id is None:
id = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % id]
ps_cmd.append(self._convert_path(clippath, clippath_transform,
simplify=False))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[(clippath, clippath_transform)] = id
return id
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
ps = self._convert_path(
path, transform, clip=clip, simplify=simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
lw = gc.get_linewidth()
stroke = lw != 0.0
if stroke:
ps_cmd.append('%.1f setlinewidth' % lw)
jint = gc.get_joinstyle()
ps_cmd.append('%d setlinejoin' % jint)
cint = gc.get_capstyle()
ps_cmd.append('%d setlinecap' % cint)
ps_cmd.append(self._convert_path(marker_path, marker_trans,
simplify=False))
if rgbFace:
if stroke:
ps_cmd.append('gsave')
ps_cmd.extend([ps_color, 'fill'])
if stroke:
ps_cmd.append('grestore')
if stroke:
ps_cmd.append('stroke')
ps_cmd.extend(['grestore', '} bind def'])
for vertices, code in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform, simplify=False))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, path_codes, offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc0, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
corr = 0#w/2*(fontsize-10)/10
if rcParams['text.latex.preview']:
# use baseline alignment!
pos = _nums_to_str(x-corr, y+bl)
self.psfrag.append(r'\psfrag{%s}[Bl][Bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
else:
# stick to the bottom alignment, but this may give incorrect baseline some times.
pos = _nums_to_str(x-corr, y)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = font.get_descent() / 64.0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def new_gc(self):
return GraphicsContextPS()
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
points = trans.transform(points)
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = float(0xffffffff) / (points_max - points_min)
xmin, ymin = points_min
xmax, ymax = points_max
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[('flags', 'u1'),
('points', '>u4', (2,)),
('colors', 'u1', (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
stream = quote_ps_string(streamarr.tostring())
self._pswriter.write("""
gsave
<< /ShadingType 4
/ColorSpace [/DeviceRGB]
/BitsPerCoordinate 32
/BitsPerComponent 8
/BitsPerFlag 8
/AntiAlias true
/Decode [ %(xmin)f %(xmax)f %(ymin)f %(ymax)f 0 1 0 1 0 1 ]
/DataSource (%(stream)s)
>>
shfill
grestore
""" % locals())
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = (gc.get_linewidth() > 0.0 and
(len(gc.get_rgb()) <= 3 or gc.get_rgb()[3] != 0.0))
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
if stroke:
write("grestore\n")
hatch = gc.get_hatch()
if hatch:
hatch_name = self.create_hatch(hatch)
write("gsave\n")
write("[/Pattern [/DeviceRGB]] setcolorspace %f %f %f " % gc.get_rgb()[:3])
write("%s setcolor fill grestore\n" % hatch_name)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasPS(thisFig)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
_renderer_class = RendererPS
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.pop("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join( papersize.keys() )) )
orientation = kwargs.pop("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.pop("dpi", 72)
facecolor = kwargs.pop("facecolor", "w")
edgecolor = kwargs.pop("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None,
**kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
elif is_writable_file_like(outfile):
title = None
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
fd, tmpfile = mkstemp()
fh = os.fdopen(fd, 'w')
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height, self._pswriter,
imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the PostScript headers
if isEPSF: print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
else: print >>fh, "%!PS-Adobe-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%Orientation: " + orientation
if not isEPSF: print >>fh, "%%DocumentPaperSizes: "+papertype
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
if not isEPSF: print >>fh, "%%Pages: 1"
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
if not rcParams['ps.useafm']:
Ndict += len(ps_renderer.used_characters)
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
if not rcParams['ps.useafm']:
for font_filename, chars in ps_renderer.used_characters.values():
if len(chars):
font = FT2Font(str(font_filename))
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
fonttype = rcParams['ps.fonttype']
# Can not use more than 255 characters from a
# single font for Type 3
if len(glyph_ids) > 255:
fonttype = 42
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
convert_ttf_to_ps(font_filename, fh, fonttype, glyph_ids)
print >>fh, "end"
print >>fh, "%%EndProlog"
if not isEPSF: print >>fh, "%%Page: 1 1"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
if rotation: print >>fh, "%d rotate"%rotation
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
if not isEPSF: print >>fh, "%%EOF"
fh.close()
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
fh = open(tmpfile)
print >>outfile, fh.read()
else:
open(outfile, 'w')
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
title = outfile
# write to a temp file, we'll move it to outfile when done
fd, tmpfile = mkstemp()
fh = os.fdopen(fd, 'w')
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height,
self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the Encapsulated PostScript headers
print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
print >>fh, "end"
print >>fh, "%%EndProlog"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
fh.close()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
# set the paper size to the figure size if isEPSF. The
# resulting ps file has the given size with correct bounding
# box so that there is no need to call 'pstoeps'
if isEPSF:
paperWidth, paperHeight = self.figure.get_size_inches()
if isLandscape:
paperWidth, paperHeight = paperHeight, paperWidth
else:
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = ps_renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag,
font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
if isinstance(outfile, file):
fh = file(tmpfile)
print >>outfile, fh.read()
else:
open(outfile, 'w')
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
latexh = file(latexfile, 'w')
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\usepackage{psfrag}
\usepackage[dvips]{graphicx}
\usepackage{color}
\pagestyle{empty}
\begin{document}
\begin{figure}
\centering
\leavevmode
%s
\includegraphics*[angle=%s]{%s}
\end{figure}
\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s)
except UnicodeEncodeError, err:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
latexh.close()
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
# check if the dvips created a ps in landscape paper. Somehow,
# above latex+dvips results in a ps file in a landscape mode for a
# certain figure sizes (e.g., 8.3in,5.8in which is a5). And the
# bounding box of the final output got messed up. We check see if
# the generated ps file is in landscape and return this
# information. The return value is used in pstoeps step to recover
# the correct bounding box. 2010-06-05 JJL
if "Landscape" in open(tmpfile).read(1000):
psfrag_rotated = True
else:
psfrag_rotated = False
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
return psfrag_rotated
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
gs_exe = ps_backend_helper.gs_exe
if ps_backend_helper.supports_ps2write: # gs version >= 9
device_name = "ps2write"
else:
device_name = "pswrite"
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=%s %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, device_name,
paper_option, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
# While it is best if above steps preserve the original bounding
# box, there seem to be cases when it is not. For those cases,
# the original bbox can be restored during the pstoeps step.
if eps:
# For some versions of gs, above steps result in an ps file
# where the original bbox is no more correct. Do not adjust
# bbox for now.
if ps_backend_helper.supports_ps2write:
# fo gs version >= 9 w/ ps2write device
pstoeps(tmpfile, bbox, rotated=rotated)
else:
pstoeps(tmpfile)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
command = 'ps2pdf -dAutoFilterColorImages=false \
-sColorImageFilter=FlateEncode %s "%s" "%s" > "%s"'% \
(paper_option, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox_header(lbrt, rotated=False):
"""
return a postscript header stringfor the given bbox lbrt=(l, b, r, t).
Optionally, return rotate command.
"""
l, b, r, t = lbrt
if rotated:
rotate = "%.2f %.2f translate\n90 rotate" % (l+r, 0)
else:
rotate = ""
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info]), rotate
# get_bbox is deprecated. I don't see any reason to use ghostscript to
# find the bounding box, as the required bounding box is alread known.
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox=None, rotated=False):
"""
Convert the postscript to encapsulated postscript. The bbox of
the eps file will be replaced with the given *bbox* argument. If
None, original bbox will be used.
"""
# if rotated==True, the output eps file need to be rotated
if bbox:
bbox_info, rotate = get_bbox_header(bbox, rotated=rotated)
else:
bbox_info, rotate = None, None
epsfile = tmpfile + '.eps'
epsh = file(epsfile, 'w')
tmph = file(tmpfile)
line = tmph.readline()
# Modify the header:
while line:
if line.startswith('%!PS'):
print >>epsh, "%!PS-Adobe-3.0 EPSF-3.0"
if bbox:
print >>epsh, bbox_info
elif line.startswith('%%EndComments'):
epsh.write(line)
print >>epsh, '%%BeginProlog'
print >>epsh, 'save'
print >>epsh, 'countdictstack'
print >>epsh, 'mark'
print >>epsh, 'newpath'
print >>epsh, '/showpage {} def'
print >>epsh, '/setpagedevice {pop} def'
print >>epsh, '%%EndProlog'
print >>epsh, '%%Page 1 1'
if rotate:
print >>epsh, rotate
break
elif bbox and (line.startswith('%%Bound') \
or line.startswith('%%HiResBound') \
or line.startswith('%%DocumentMedia') \
or line.startswith('%%Pages')):
pass
else:
epsh.write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith('%%Trailer'):
print >>epsh, '%%Trailer'
print >>epsh, 'cleartomark'
print >>epsh, 'countdictstack'
print >>epsh, 'exch sub { end } repeat'
print >>epsh, 'restore'
if rcParams['ps.usedistiller'] == 'xpdf':
# remove extraneous "end" operator:
line = tmph.readline()
elif line.startswith('%%PageBoundingBox'):
pass
else:
epsh.write(line)
line = tmph.readline()
tmph.close()
epsh.close()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
FigureManager = FigureManagerPS
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
| gpl-3.0 |
kambysese/mne-python | mne/viz/topo.py | 3 | 39324 | """Functions to plot M/EEG data on topo (one axes per channel)."""
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
from copy import deepcopy
from functools import partial
from itertools import cycle
import numpy as np
from ..io.pick import channel_type, pick_types
from ..utils import _clean_names, warn, _check_option, Bunch
from ..channels.layout import _merge_ch_data, _pair_grad_sensors, find_layout
from ..defaults import _handle_default
from .utils import (_check_delayed_ssp, _get_color_list, _draw_proj_checkbox,
add_background_image, plt_show, _setup_vmin_vmax,
DraggableColorbar, _setup_ax_spines,
_check_cov, _plot_masked_image)
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None, legend=False):
"""Create iterator over channel positions.
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
Hence, this enables convenient topography plot customization.
Parameters
----------
info : instance of Info
The measurement info.
layout : instance of mne.channels.Layout | None
The layout to use. If None, layout will be guessed.
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: ``function(axis, channel_index)``.
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : color
The figure face color. Defaults to black.
axis_facecolor : color
The axis face color. Defaults to black.
axis_spinecolor : color
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale : float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
legend : bool
If True, an additional axis is created in the bottom right corner
that can be used to, e.g., construct a legend. The index of this
axis will be -1.
Returns
-------
gen : generator
A generator that can be unpacked into:
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
return _iter_topography(info, layout, on_pick, fig, fig_facecolor,
axis_facecolor, axis_spinecolor, layout_scale,
legend=legend)
def _legend_axis(pos):
"""Add a legend axis to the bottom right."""
import matplotlib.pyplot as plt
left, bottom = pos[:, 0].max(), pos[:, 1].min()
wid, hei = pos[-1, 2:]
return plt.axes([left, bottom + .05, wid, hei])
def _iter_topography(info, layout, on_pick, fig, fig_facecolor='k',
axis_facecolor='k', axis_spinecolor='k',
layout_scale=None, unified=False, img=False, axes=None,
legend=False):
"""Iterate over topography.
Has the same parameters as iter_topography, plus:
unified : bool
If False (default), multiple matplotlib axes will be used.
If True, a single axis will be constructed. The former is
useful for custom plotting, the latter for speed.
"""
from matplotlib import pyplot as plt, collections
if fig is None:
fig = plt.figure()
def format_coord_unified(x, y, pos=None, ch_names=None):
"""Update status bar with channel name under cursor."""
# find candidate channels (ones that are down and left from cursor)
pdist = np.array([x, y]) - pos[:, :2]
pind = np.where((pdist >= 0).all(axis=1))[0]
if len(pind) > 0:
# find the closest channel
closest = pind[np.sum(pdist[pind, :]**2, axis=1).argmin()]
# check whether we are inside its box
in_box = (pdist[closest, :] < pos[closest, 2:]).all()
else:
in_box = False
return (('%s (click to magnify)' % ch_names[closest]) if
in_box else 'No channel here')
def format_coord_multiaxis(x, y, ch_name=None):
"""Update status bar with channel name under cursor."""
return '%s (click to magnify)' % ch_name
fig.set_facecolor(fig_facecolor)
if layout is None:
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
if unified:
if axes is None:
under_ax = plt.axes([0, 0, 1, 1])
under_ax.axis('off')
else:
under_ax = axes
under_ax.format_coord = partial(format_coord_unified, pos=pos,
ch_names=layout.names)
under_ax.set(xlim=[0, 1], ylim=[0, 1])
axs = list()
for idx, name in iter_ch:
ch_idx = ch_names.index(name)
if not unified: # old, slow way
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
for spine in ax.spines.values():
spine.set_color(axis_spinecolor)
if not legend:
ax.set(xticklabels=[], yticklabels=[])
for tick in ax.get_xticklines() + ax.get_yticklines():
tick.set_visible(False)
ax._mne_ch_name = name
ax._mne_ch_idx = ch_idx
ax._mne_ax_face_color = axis_facecolor
ax.format_coord = partial(format_coord_multiaxis, ch_name=name)
yield ax, ch_idx
else:
ax = Bunch(ax=under_ax, pos=pos[idx], data_lines=list(),
_mne_ch_name=name, _mne_ch_idx=ch_idx,
_mne_ax_face_color=axis_facecolor)
axs.append(ax)
if not unified and legend:
ax = _legend_axis(pos)
yield ax, -1
if unified:
under_ax._mne_axs = axs
# Create a PolyCollection for the axis backgrounds
verts = np.transpose([pos[:, :2],
pos[:, :2] + pos[:, 2:] * [1, 0],
pos[:, :2] + pos[:, 2:],
pos[:, :2] + pos[:, 2:] * [0, 1],
], [1, 0, 2])
if not img:
under_ax.add_collection(collections.PolyCollection(
verts, facecolor=axis_facecolor, edgecolor=axis_spinecolor,
linewidth=1.)) # Not needed for image plots.
for ax in axs:
yield ax, ax._mne_ch_idx
def _plot_topo(info, times, show_func, click_func=None, layout=None,
vmin=None, vmax=None, ylim=None, colorbar=None, border='none',
axis_facecolor='k', fig_facecolor='k', cmap='RdBu_r',
layout_scale=None, title=None, x_label=None, y_label=None,
font_color='w', unified=False, img=False, axes=None):
"""Plot on sensor layout."""
import matplotlib.pyplot as plt
if layout.kind == 'custom':
layout = deepcopy(layout)
layout.pos[:, :2] -= layout.pos[:, :2].min(0)
layout.pos[:, :2] /= layout.pos[:, :2].max(0)
# prepare callbacks
tmin, tmax = times[0], times[-1]
click_func = show_func if click_func is None else click_func
on_pick = partial(click_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label)
if axes is None:
fig = plt.figure()
axes = plt.axes([0.015, 0.025, 0.97, 0.95])
axes.set_facecolor(fig_facecolor)
else:
fig = axes.figure
if colorbar:
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = fig.colorbar(sm, ax=axes, pad=0.025, fraction=0.075, shrink=0.5,
anchor=(-1, 0.5))
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color=font_color)
axes.axis('off')
my_topo_plot = _iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor,
unified=unified, img=img, axes=axes)
# Temporarily converting the ylim to a list to avoid zip object exhaustion
if ylim is not None:
ylim_list = [list(t) for t in zip(*ylim)]
else:
ylim_list = ylim
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim_list is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim = zip(*ylim_list)
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if title is not None:
plt.figtext(0.03, 0.95, title, color=font_color, fontsize=15, va='top')
return fig
def _plot_topo_onpick(event, show_func):
"""Onpick callback that shows a single channel in a new figure."""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
import matplotlib.pyplot as plt
try:
if hasattr(orig_ax, '_mne_axs'): # in unified, single-axes mode
x, y = event.xdata, event.ydata
for ax in orig_ax._mne_axs:
if x >= ax.pos[0] and y >= ax.pos[1] and \
x <= ax.pos[0] + ax.pos[2] and \
y <= ax.pos[1] + ax.pos[3]:
orig_ax = ax
break
else:
# no axis found
return
elif not hasattr(orig_ax, '_mne_ch_idx'):
# neither old nor new mode
return
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_facecolor(face_color)
# allow custom function to override parameters
show_func(ax, ch_idx)
plt_show(fig=fig)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise
def _compute_ax_scalings(bn, xlim, ylim):
"""Compute scale factors for a unified plot."""
if isinstance(ylim[0], (tuple, list, np.ndarray)):
ylim = (ylim[0][0], ylim[1][0])
pos = bn.pos
bn.x_s = pos[2] / (xlim[1] - xlim[0])
bn.x_t = pos[0] - bn.x_s * xlim[0]
bn.y_s = pos[3] / (ylim[1] - ylim[0])
bn.y_t = pos[1] - bn.y_s * ylim[0]
def _check_vlim(vlim):
"""Check the vlim."""
return not np.isscalar(vlim) and vlim is not None
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, onselect, ylim=None,
tfr=None, freq=None, x_label=None, y_label=None,
colorbar=False, cmap=('RdBu_r', True), yscale='auto',
mask=None, mask_style="both", mask_cmap="Greys",
mask_alpha=0.1, is_jointplot=False):
"""Show time-frequency map as two-dimensional image."""
from matplotlib import pyplot as plt
from matplotlib.widgets import RectangleSelector
_check_option('yscale', yscale, ['auto', 'linear', 'log'])
cmap, interactive_cmap = cmap
times = np.linspace(tmin, tmax, num=tfr[ch_idx].shape[1])
img, t_end = _plot_masked_image(
ax, tfr[ch_idx], times, mask, yvals=freq, cmap=cmap,
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap, yscale=yscale)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
if isinstance(colorbar, DraggableColorbar):
cbar = colorbar.cbar # this happens with multiaxes case
else:
cbar = plt.colorbar(mappable=img, ax=ax)
if interactive_cmap:
ax.CB = DraggableColorbar(cbar, img)
ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept
return t_end
def _imshow_tfr_unified(bn, ch_idx, tmin, tmax, vmin, vmax, onselect,
ylim=None, tfr=None, freq=None, vline=None,
x_label=None, y_label=None, colorbar=False,
picker=True, cmap='RdBu_r', title=None, hline=None):
"""Show multiple tfrs on topo using a single axes."""
_compute_ax_scalings(bn, (tmin, tmax), (freq[0], freq[-1]))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax,
bn.y_t + bn.y_s * freq[0], bn.y_t + bn.y_s * freq[-1])
data_lines.append(ax.imshow(tfr[ch_idx], clip_on=True, clip_box=bn.pos,
extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, cmap=cmap))
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False, hline=None, hvline_color='w',
labels=None):
"""Show time series on topo split across multiple axes."""
import matplotlib.pyplot as plt
from matplotlib.colors import colorConverter
picker_flag = False
for data_, color_, times_ in zip(data, color, times):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
line = ax.plot(times_, data_[ch_idx], color=color_, picker=True)[0]
line.set_pickradius(1e9)
picker_flag = True
else:
ax.plot(times_, data_[ch_idx], color=color_)
def _format_coord(x, y, labels, ax):
"""Create status string based on cursor coordinates."""
# find indices for datasets near cursor (if any)
tdiffs = [np.abs(tvec - x).min() for tvec in times]
nearby = [k for k, tdiff in enumerate(tdiffs) if
tdiff < (tmax - tmin) / 100]
xlabel = ax.get_xlabel()
xunit = (xlabel[xlabel.find('(') + 1:xlabel.find(')')]
if '(' in xlabel and ')' in xlabel else 's')
timestr = '%6.3f %s: ' % (x, xunit)
if not nearby:
return '%s Nothing here' % timestr
labels = [''] * len(nearby) if labels is None else labels
nearby_data = [(data[n], labels[n], times[n]) for n in nearby]
ylabel = ax.get_ylabel()
yunit = (ylabel[ylabel.find('(') + 1:ylabel.find(')')]
if '(' in ylabel and ')' in ylabel else '')
# try to estimate whether to truncate condition labels
slen = 9 + len(xunit) + sum([12 + len(yunit) + len(label)
for label in labels])
bar_width = (ax.figure.get_size_inches() * ax.figure.dpi)[0] / 5.5
# show labels and y values for datasets near cursor
trunc_labels = bar_width < slen
s = timestr
for data_, label, tvec in nearby_data:
idx = np.abs(tvec - x).argmin()
s += '%7.2f %s' % (data_[ch_idx, idx], yunit)
if trunc_labels:
label = (label if len(label) <= 10 else
'%s..%s' % (label[:6], label[-2:]))
s += ' [%s] ' % label if label else ' '
return s
ax.format_coord = lambda x, y: _format_coord(x, y, labels=labels, ax=ax)
def _cursor_vline(event):
"""Draw cursor (vertical line)."""
ax = event.inaxes
if not ax:
return
if ax._cursorline is not None:
ax._cursorline.remove()
ax._cursorline = ax.axvline(event.xdata, color=ax._cursorcolor)
ax.figure.canvas.draw()
def _rm_cursor(event):
ax = event.inaxes
if ax._cursorline is not None:
ax._cursorline.remove()
ax._cursorline = None
ax.figure.canvas.draw()
ax._cursorline = None
# choose cursor color based on perceived brightness of background
try:
facecol = colorConverter.to_rgb(ax.get_facecolor())
except AttributeError: # older MPL
facecol = colorConverter.to_rgb(ax.get_axis_bgcolor())
face_brightness = np.dot(facecol, np.array([299, 587, 114]))
ax._cursorcolor = 'white' if face_brightness < 150 else 'black'
plt.connect('motion_notify_event', _cursor_vline)
plt.connect('axes_leave_event', _rm_cursor)
ymin, ymax = ax.get_ylim()
# don't pass vline or hline here (this fxn doesn't do hvline_color):
_setup_ax_spines(ax, [], tmin, tmax, ymin, ymax, hline=False)
ax.figure.set_facecolor('k' if hvline_color == 'w' else 'w')
ax.spines['bottom'].set_color(hvline_color)
ax.spines['left'].set_color(hvline_color)
ax.tick_params(axis='x', colors=hvline_color, which='both')
ax.tick_params(axis='y', colors=hvline_color, which='both')
ax.title.set_color(hvline_color)
ax.xaxis.label.set_color(hvline_color)
ax.yaxis.label.set_color(hvline_color)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
if isinstance(y_label, list):
ax.set_ylabel(y_label[ch_idx])
else:
ax.set_ylabel(y_label)
if vline:
plt.axvline(vline, color=hvline_color, linewidth=1.0,
linestyle='--')
if hline:
plt.axhline(hline, color=hvline_color, linewidth=1.0, zorder=10)
if colorbar:
plt.colorbar()
def _plot_timeseries_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim, data,
color, times, vline=None, x_label=None,
y_label=None, colorbar=False, hline=None,
hvline_color='w'):
"""Show multiple time series on topo using a single axes."""
import matplotlib.pyplot as plt
if not (ylim and not any(v is None for v in ylim)):
ylim = [min(np.min(d) for d in data), max(np.max(d) for d in data)]
# Translation and scale parameters to take data->under_ax normalized coords
_compute_ax_scalings(bn, (tmin, tmax), ylim)
pos = bn.pos
data_lines = bn.data_lines
ax = bn.ax
# XXX These calls could probably be made faster by using collections
for data_, color_, times_ in zip(data, color, times):
data_lines.append(ax.plot(
bn.x_t + bn.x_s * times_, bn.y_t + bn.y_s * data_[ch_idx],
linewidth=0.5, color=color_, clip_on=True, clip_box=pos)[0])
if vline:
vline = np.array(vline) * bn.x_s + bn.x_t
ax.vlines(vline, pos[1], pos[1] + pos[3], color=hvline_color,
linewidth=0.5, linestyle='--')
if hline:
hline = np.array(hline) * bn.y_s + bn.y_t
ax.hlines(hline, pos[0], pos[0] + pos[2], color=hvline_color,
linewidth=0.5)
if x_label is not None:
ax.text(pos[0] + pos[2] / 2., pos[1], x_label,
horizontalalignment='center', verticalalignment='top')
if y_label is not None:
y_label = y_label[ch_idx] if isinstance(y_label, list) else y_label
ax.text(pos[0], pos[1] + pos[3] / 2., y_label,
horizontalignment='right', verticalalignment='middle',
rotation=90)
if colorbar:
plt.colorbar()
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, data=None,
epochs=None, sigma=None, order=None, scalings=None,
vline=None, x_label=None, y_label=None, colorbar=False,
cmap='RdBu_r', vlim_array=None):
"""Plot erfimage on sensor topography."""
from scipy import ndimage
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :]
if vlim_array is not None:
vmin, vmax = vlim_array[ch_idx]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
img = ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap, interpolation='nearest')
ax = plt.gca()
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
plt.colorbar(mappable=img)
def _erfimage_imshow_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None, order=None,
scalings=None, vline=None, x_label=None,
y_label=None, colorbar=False, cmap='RdBu_r',
vlim_array=None):
"""Plot erfimage topography using a single axis."""
from scipy import ndimage
_compute_ax_scalings(bn, (tmin, tmax), (0, len(epochs.events)))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax, bn.y_t,
bn.y_t + bn.y_s * len(epochs.events))
this_data = data[:, ch_idx, :]
vmin, vmax = (None, None) if vlim_array is None else vlim_array[ch_idx]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
data_lines.append(ax.imshow(this_data, extent=extent, aspect='auto',
origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap,
interpolation='nearest'))
def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None,
proj=False, vline=(0.,), hline=(0.,), fig_facecolor='k',
fig_background=None, axis_facecolor='k', font_color='w',
merge_channels=False, legend=True, axes=None, show=True,
noise_cov=None):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
Matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad. If None,
the ylim parameter for each channel is determined by the maximum
absolute peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
hline : list of floats | None
The values at which to show a horizontal line.
fig_facecolor : color
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
axis_facecolor : color
The face color to be used for each sensor plot. Defaults to black.
font_color : color
The color of text in the colorbar and title. Defaults to white.
merge_channels : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | string | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
show : bool
Show figure if True.
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels names are shown in italic.
Can be a string to load a covariance from disk.
.. versionadded:: 0.16.0
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
import matplotlib.pyplot as plt
from ..cov import whiten_evoked
if not type(evoked) in (tuple, list):
evoked = [evoked]
if type(color) in (tuple, list):
if len(color) != len(evoked):
raise ValueError('Lists of evoked objects and colors'
' must have the same length')
elif color is None:
colors = ['w'] + _get_color_list
stop = (slice(len(evoked)) if len(evoked) < len(colors)
else slice(len(colors)))
color = cycle(colors[stop])
if len(evoked) > len(colors):
warn('More evoked objects than colors available. You should pass '
'a list of unique colors.')
else:
color = cycle([color])
noise_cov = _check_cov(noise_cov, evoked[0].info)
if noise_cov is not None:
evoked = [whiten_evoked(e, noise_cov) for e in evoked]
else:
evoked = [e.copy() for e in evoked]
info = evoked[0].info
ch_names = evoked[0].ch_names
scalings = _handle_default('scalings', scalings)
if not all(e.ch_names == ch_names for e in evoked):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if merge_channels:
picks = _pair_grad_sensors(info, topomap_coords=False)
chs = list()
for pick in picks[::2]:
ch = info['chs'][pick]
ch['ch_name'] = ch['ch_name'][:-1] + 'X'
chs.append(ch)
info['chs'] = chs
info['bads'] = list() # bads dropped on pair_grad_sensors
info._update_redundant()
info._check_consistency()
new_picks = list()
for e in evoked:
data, _ = _merge_ch_data(e.data[picks], 'grad', [])
if noise_cov is None:
data *= scalings['grad']
e.data = data
new_picks.append(range(len(data)))
picks = new_picks
types_used = ['grad']
unit = _handle_default('units')['grad'] if noise_cov is None else 'NA'
y_label = 'RMS amplitude (%s)' % unit
if layout is None:
layout = find_layout(info)
if not merge_channels:
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = set(layout.names) & set(ch_names)
types_used = {channel_type(info, ch_names.index(ch))
for ch in chs_in_layout}
# remove possible reference meg channels
types_used = set.difference(types_used, set('ref_meg'))
# one check for all vendors
meg_types = {'mag', 'grad'}
is_meg = len(set.intersection(types_used, meg_types)) > 0
nirs_types = {'hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od'}
is_nirs = len(set.intersection(types_used, nirs_types)) > 0
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
for kk in types_used]
elif is_nirs:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, fnirs=kk, ref_meg=False, exclude=[])
for kk in types_used]
else:
types_used_kwargs = {t: True for t in types_used}
picks = [pick_types(info, meg=False, exclude=[],
**types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
if noise_cov is None:
for e in evoked:
for pick, ch_type in zip(picks, types_used):
e.data[pick] *= scalings[ch_type]
if proj is True and all(e.proj is not True for e in evoked):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
# Y labels for picked plots must be reconstructed
y_label = list()
for ch_idx in range(len(chs_in_layout)):
if noise_cov is None:
unit = _handle_default('units')[channel_type(info, ch_idx)]
else:
unit = 'NA'
y_label.append('Amplitude (%s)' % unit)
if ylim is None:
# find maxima over all evoked data for each channel pick
ymaxes = np.array([max(np.abs(e.data[t]).max() for e in evoked)
for t in picks])
ylim_ = (-ymaxes, ymaxes)
elif isinstance(ylim, dict):
ylim_ = _handle_default('ylim', ylim)
ylim_ = [ylim_[kk] for kk in types_used]
# extra unpack to avoid bug #1700
if len(ylim_) == 1:
ylim_ = ylim_[0]
else:
ylim_ = zip(*[np.array(yl) for yl in ylim_])
else:
raise TypeError('ylim must be None or a dict. Got %s.' % type(ylim))
data = [e.data for e in evoked]
comments = [e.comment for e in evoked]
times = [e.times for e in evoked]
show_func = partial(_plot_timeseries_unified, data=data, color=color,
times=times, vline=vline, hline=hline,
hvline_color=font_color)
click_func = partial(_plot_timeseries, data=data, color=color, times=times,
vline=vline, hline=hline, hvline_color=font_color,
labels=comments)
time_min = min([t[0] for t in times])
time_max = max([t[-1] for t in times])
fig = _plot_topo(info=info, times=[time_min, time_max],
show_func=show_func, click_func=click_func, layout=layout,
colorbar=False, ylim=ylim_, cmap=None,
layout_scale=layout_scale, border=border,
fig_facecolor=fig_facecolor, font_color=font_color,
axis_facecolor=axis_facecolor, title=title,
x_label='Time (s)', y_label=y_label, unified=True,
axes=axes)
add_background_image(fig, fig_background)
if legend is not False:
legend_loc = 0 if legend is True else legend
labels = [e.comment if e.comment else 'Unknown' for e in evoked]
legend = plt.legend(labels, loc=legend_loc,
prop={'size': 10})
legend.get_frame().set_facecolor(axis_facecolor)
txts = legend.get_texts()
for txt, col in zip(txts, color):
txt.set_color(col)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo_proj,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
plt_show(show)
return fig
def _plot_update_evoked_topo_proj(params, bools):
"""Update topo sensor plots."""
evokeds = [e.copy() for e in params['evokeds']]
fig = params['fig']
projs = [proj for proj, b in zip(params['projs'], bools) if b]
params['proj_bools'] = bools
for e in evokeds:
e.add_proj(projs, remove_existing=True)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
for ax in fig.axes[0]._mne_axs:
for line, evoked in zip(ax.data_lines, evokeds):
line.set_ydata(ax.y_t + ax.y_s * evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
def plot_topo_image_epochs(epochs, layout=None, sigma=0., vmin=None,
vmax=None, colorbar=None, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k',
fig_background=None, font_color='w', show=True):
"""Plot Event Related Potential / Fields image on topographies.
Parameters
----------
epochs : instance of :class:`~mne.Epochs`
The epochs.
layout : instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is µV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is µV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool | None
Whether to display a colorbar or not. If ``None`` a colorbar will be
shown only if all channels are of the same type. Defaults to ``None``.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : colormap
Colors to be mapped to the values.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
``None``, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
border : str
Matplotlib borders style to be used for each sensor plot.
fig_facecolor : color
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
:func:`matplotlib.pyplot.imshow`. Defaults to ``None``.
font_color : color
The color of tick labels in the colorbar. Defaults to white.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : instance of :class:`matplotlib.figure.Figure`
Figure distributing one image per channel across sensor topography.
Notes
-----
In an interactive Python session, this plot will be interactive; clicking
on a channel image will pop open a larger view of the image; this image
will always have a colorbar even when the topo plot does not (because it
shows multiple sensor types).
"""
scalings = _handle_default('scalings', scalings)
# make a copy because we discard non-data channels and scale the data
epochs = epochs.copy().load_data()
# use layout to subset channels present in epochs object
if layout is None:
layout = find_layout(epochs.info)
ch_names = set(layout.names) & set(epochs.ch_names)
idxs = [epochs.ch_names.index(ch_name) for ch_name in ch_names]
epochs = epochs.pick(idxs)
# get lists of channel type & scale coefficient
ch_types = epochs.get_channel_types()
scale_coeffs = [scalings.get(ch_type, 1) for ch_type in ch_types]
# scale the data
epochs._data *= np.array(scale_coeffs)[:, np.newaxis]
data = epochs.get_data()
# get vlims for each channel type
vlim_dict = dict()
for ch_type in set(ch_types):
this_data = data[:, np.where(np.array(ch_types) == ch_type)]
vlim_dict[ch_type] = _setup_vmin_vmax(this_data, vmin, vmax)
vlim_array = np.array([vlim_dict[ch_type] for ch_type in ch_types])
# only show colorbar if we have a single channel type
if colorbar is None:
colorbar = (len(set(ch_types)) == 1)
# if colorbar=True, we know we have only 1 channel type so all entries
# in vlim_array are the same, just take the first one
if colorbar and vmin is None and vmax is None:
vmin, vmax = vlim_array[0]
show_func = partial(_erfimage_imshow_unified, scalings=scale_coeffs,
order=order, data=data, epochs=epochs, sigma=sigma,
cmap=cmap, vlim_array=vlim_array)
erf_imshow = partial(_erfimage_imshow, scalings=scale_coeffs, order=order,
data=data, epochs=epochs, sigma=sigma, cmap=cmap,
vlim_array=vlim_array, colorbar=True)
fig = _plot_topo(info=epochs.info, times=epochs.times,
click_func=erf_imshow, show_func=show_func, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
fig_facecolor=fig_facecolor, font_color=font_color,
border=border, x_label='Time (s)', y_label='Epoch',
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
| bsd-3-clause |
OshynSong/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/linear_model/logistic.py | 57 | 65098 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (DataConversionWarning,
check_X_y, NotFittedError)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual, sample_weight):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver in ['liblinear', 'sag']:
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if solver == 'liblinear' and sample_weight is not None:
raise ValueError("Solver %s does not support "
"sample weights." % solver)
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
warm_start_sag = {'coef': w0}
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, 'log', 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum,
warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
max_iter : int
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
self.n_iter_ = np.array([n_iter_])
return self
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for 'lbfgs' and
'newton-cg' solvers.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
tjlaboss/openmc | tests/regression_tests/mgxs_library_distribcell/test.py | 7 | 2724 | import hashlib
import sys
import openmc
import openmc.mgxs
from openmc.examples import pwr_assembly
import pytest
from tests.testing_harness import PyAPITestHarness
class MGXSTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
# Generate inputs using parent class routine
super().__init__(*args, **kwargs)
# Initialize a one-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 20.e6])
# Initialize MGXS Library for a few cross section types
# for one material-filled cell in the geometry
self.mgxs_lib = openmc.mgxs.Library(self._model.geometry)
self.mgxs_lib.by_nuclide = False
# Test all relevant MGXS types
relevant_MGXS_TYPES = [item for item in openmc.mgxs.MGXS_TYPES
if item != 'current']
self.mgxs_lib.mgxs_types = tuple(relevant_MGXS_TYPES) + \
openmc.mgxs.MDGXS_TYPES
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.num_delayed_groups = 6
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'distribcell'
cells = self.mgxs_lib.geometry.get_all_material_cells().values()
self.mgxs_lib.domains = [c for c in cells if c.name == 'fuel']
self.mgxs_lib.build_library()
# Add tallies
self.mgxs_lib.add_to_tallies_file(self._model.tallies, merge=False)
self._model.tallies.export_to_xml()
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Average the MGXS across distribcell subdomains
avg_lib = self.mgxs_lib.get_subdomain_avg_library()
# Build a string from Pandas Dataframe for each 1-group MGXS
outstr = ''
for domain in avg_lib.domains:
for mgxs_type in avg_lib.mgxs_types:
mgxs = avg_lib.get_mgxs(domain, mgxs_type)
df = mgxs.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
@pytest.mark.xfail(sys.version_info < (3, 6),
reason="Pandas 1.0 API changed and requires Python 3.6+")
def test_mgxs_library_distribcell():
model = pwr_assembly()
harness = MGXSTestHarness('statepoint.10.h5', model)
harness.main()
| mit |
costypetrisor/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
zoeyangyy/event-extraction | remote/gcn-train.py | 1 | 17360 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Time : 2017/11/30 下午12:20
# @Author : Zoe
# @File : wande-event.py
# @Description : 1. 训练测试集按2017年划分 ok
# 2. 抽取batch的方法:只用每次shuffle的时候,training accuracy才高???
# 3. Recall@10 / 换成大类 / testing accuracy = 0.581037
# 4. self-attention LSTM model
# 5. 求 阿尔法 ,分别基于 事件/位置/时间信息。
# 需要修改:20 / LSTM
# TODO 做一些baseline 语言模型n-gram / 只猜概率最大的数据 / 数据集分析
# 21669/85882 = 0.2523
import json
import pickle
import datetime
import time
import collections
import numpy as np
import tensorflow as tf
import random
import matplotlib.pyplot as plt
import os,sys,getopt
import config
from sklearn import preprocessing
flags = tf.app.flags
FLAGS = flags.FLAGS
# dev或test数据集
opts, args = getopt.getopt(sys.argv[1:], "t:n:c:v:", ["type=","note=","cf=","cuda="])
trainType = 'event'
note = ''
classifier = 'mlp'
cuda = '1'
for op, value in opts:
if op == "--type":
trainType = value
if op == '--note':
note = value
if op == '--cf':
classifier = value
if op == '--cuda':
cuda = value
model_save_path = '../data/ckpt-gcn/'+trainType+classifier+note+'.ckpt'
os.environ["CUDA_VISIBLE_DEVICES"] = cuda
config = tf.ConfigProto(log_device_placement=True, allow_soft_placement=True)
config.gpu_options.per_process_gpu_memory_fraction = 0.9 # 占用GPU90%的显存
config.gpu_options.allow_growth = True
# TODO 数据按(00,17)划分 .new 数据集 ok
f_data = open('../data/pickle.data.5.train', 'rb')
x_mat_list = pickle.load(f_data)
y_tag_list = pickle.load(f_data)
f_data.close()
print('***DATA SHAPE***\n', x_mat_list.shape, y_tag_list.shape)
# shuffle x y
def shuffle_xy(x_mat_list, y_tag_list):
zip_list = list(zip(x_mat_list, y_tag_list))
random.shuffle(zip_list)
x_mat_list[:], y_tag_list[:] = zip(*zip_list)
return x_mat_list, y_tag_list
#TODO 参数大小调整 ok
training_iters = x_mat_list.shape[0] / FLAGS._batch_size
trainNum = 100000
x = tf.placeholder(tf.int32, [None, FLAGS.n_steps*2])
y = tf.placeholder(tf.int32, [None, FLAGS.n_classes])
output_kp = tf.placeholder(tf.float32, [])
# TODO 看一下参数的训练过程 ok
weights = {
# (feature_dim,128)
'weight_add': tf.Variable(tf.random_normal([FLAGS.n_hidden_units, FLAGS.n_hidden_units])),
'baseline_gcn': tf.Variable(tf.random_normal([FLAGS.n_hidden_units + FLAGS.embedding_size, FLAGS.n_hidden_units])),
'gcn_2': tf.Variable(tf.random_normal([FLAGS.n_hidden_units, FLAGS.n_hidden_units])),
'gcn_3': tf.Variable(tf.random_normal([FLAGS.n_hidden_units, FLAGS.n_hidden_units])),
'attention': tf.Variable(tf.random_normal([FLAGS.n_hidden_units, FLAGS.n_hidden_units])),
'attention_2': tf.Variable(tf.random_normal([FLAGS.n_hidden_units, 1])),
# (128,n_classes)
'out': tf.Variable(tf.random_normal([FLAGS.n_hidden_units, FLAGS.n_classes])),
'out_gcn': tf.Variable(tf.random_normal([FLAGS.n_hidden_units, 1]))
}
biases = {
'l1': tf.Variable(tf.constant(0.1, shape=[FLAGS.n_hidden_units])),
'l2': tf.Variable(tf.constant(0.1, shape=[FLAGS.n_hidden_units])),
'attention': tf.Variable(tf.constant(0.1, shape=[FLAGS.n_hidden_units])),
# (n_classes)
'out': tf.Variable(tf.constant(0.1, shape=[FLAGS.n_classes])),
}
add_weights = {
'baseline': tf.Variable(tf.constant(0.25)),
'position': tf.Variable(tf.constant(0.25)),
'time': tf.Variable(tf.constant(0.25)),
'event': tf.Variable(tf.constant(0.25))
}
time_v = tf.get_variable('time', [4])
position = tf.get_variable('position', [5])
event = tf.get_variable('event', [FLAGS.n_classes, FLAGS.n_classes])
baseline_gcn = list()
for _ in range(FLAGS.n_classes):
baseline_gcn.append(tf.Variable(tf.random_normal([FLAGS.n_hidden_units +FLAGS.embedding_size, FLAGS.n_hidden_units])))
batchNum = 0
batch_xs = np.ones(shape=(FLAGS._batch_size, FLAGS.Chain_Lens*2)).astype(int)
batch_ys = np.ones(shape=(FLAGS._batch_size, FLAGS.Chain_Lens*2)).astype(int)
def next_batch():
global batchNum, x_mat_list, y_tag_list
if (batchNum + 1) * FLAGS._batch_size > x_mat_list.shape[0]:
x_mat_list, y_tag_list = shuffle_xy(x_mat_list, y_tag_list)
batchNum = 0
batch_x = x_mat_list[batchNum * FLAGS._batch_size: (batchNum + 1) * FLAGS._batch_size]
batch_y = y_tag_list[batchNum * FLAGS._batch_size: (batchNum + 1) * FLAGS._batch_size]
batchNum += 1
return batch_x, batch_y
# TODO 特征维度 concat instead of add 或者用 权重add ok
def LSTM(X, weights, biases, time_v, position, event):
# hidden layer for input to cell
embedding = tf.get_variable("embedding", [FLAGS.vocab_size, FLAGS.embedding_size], dtype=tf.float32)
X_in = tf.nn.embedding_lookup(embedding, X[:, :FLAGS.Chain_Lens])
# => (64 batch, 128 hidden)
# cell
def unit_lstm():
fw_lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(FLAGS.n_hidden_units, forget_bias=1.0, state_is_tuple=True)
fw_lstm_cell = tf.nn.rnn_cell.DropoutWrapper(fw_lstm_cell, output_keep_prob=output_kp)
return fw_lstm_cell
fw_cell = tf.nn.rnn_cell.MultiRNNCell([unit_lstm() for i in range(3)], state_is_tuple=True)
fw_init_state = fw_cell.zero_state(FLAGS._batch_size, dtype=tf.float32)
outputs, states = tf.nn.dynamic_rnn(fw_cell, X_in, initial_state=fw_init_state, time_major=False)
# TODO 应该也取前面hidden states的平均值 ok
# ********LSTM*******
tf_results = tf.constant(0.0001)
tf_baseline = tf.constant(0.0001)
for i in range(FLAGS.Chain_Lens):
# batch_number * Chain_Lens * n_hidden_units => 按某i个Chain_Lens取数据
result_beta = tf.reshape(tf.slice(outputs, [0, i, 0], [-1, 1, -1]), [-1, FLAGS.n_hidden_units])
result_beta = result_beta * (1 / FLAGS.Chain_Lens)
tf_baseline = tf.add(tf_baseline, result_beta)
# tf_results = tf.add(tf_results, tf_baseline * add_weights['baseline'])
# tf_results = tf.add(tf_results, tf_baseline)
tf_results = tf.add(tf_results, tf_baseline * (1-add_weights['position']-add_weights['time']-add_weights['event']))
# tf_results = states[1]
# ********LSTM*******
if trainType == 'attention':
# ********attention*******
tf_attention = tf.constant(0.1, shape=[FLAGS._batch_size, 1])
for i in range(FLAGS.Chain_Lens):
result_beta = tf.reshape(tf.slice(outputs, [0, i, 0], [-1, 1, -1]), [-1, FLAGS.n_hidden_units])
result_beta = tf.nn.tanh(tf.matmul(result_beta, weights['attention']) + biases['attention'])
tf_attention = tf.concat([tf_attention, result_beta],1)
tf_attention = tf.reshape(tf.slice(tf_attention, [0, 1], [-1,-1]), [FLAGS._batch_size, FLAGS.Chain_Lens, -1])
tf_other = tf.constant(0.001, shape=[1])
for i in range(FLAGS._batch_size):
soft = tf.reshape(tf.nn.softmax(tf.squeeze(tf.matmul(tf_attention[i], weights['attention_2']))),[-1,1])
tf_other = tf.concat([tf_other, tf.reshape(tf.matmul(tf.transpose(outputs[i]), soft), [-1])], 0)
tf_other = tf.reshape(tf_other[1:], [FLAGS._batch_size, -1])
# ********attention*******
tf_results = tf.add(tf_results, tf_other)
# TODO attention 换成数值试, 比较baseline和position
if trainType == 'position' or trainType == 'all':
# ********position attention*******
tf_position = tf.constant(0.0001)
for i in range(FLAGS.Chain_Lens):
# batch_number * Chain_Lens * n_hidden_units => 按某i个Chain_Lens取数据
result_beta = tf.reshape(tf.slice(outputs, [0, i, 0], [-1, 1, -1]), [-1, FLAGS.n_hidden_units])
result_beta = result_beta * position[i]
tf_position = tf.add(tf_position, result_beta)
# ********position attention*******
# tf_results = tf.add(tf_results, tf_position*(1-add_weights['baseline']))
tf_results = tf.add(tf_results, tf_position * add_weights['position'])
if trainType == 'time' or trainType == 'all':
# ********time attention*******
tf_time = tf.constant(0.0001)
for i in range(FLAGS.Chain_Lens):
# batch_number * Chain_Lens * n_hidden_units => 按某i个Chain_Lens取数据
result_alpha = tf.reshape(tf.slice(outputs, [0, i, 0], [-1, 1, -1]), [-1, FLAGS.n_hidden_units])
result_sub = tf.constant(0.1, shape=[FLAGS.n_hidden_units])
for index in range(FLAGS._batch_size):
result_sub = tf.concat([result_sub, result_alpha[index] * time_v[X[index][i + 5]-1]], 0)
# batch_number * n_hidden_units
result_sub = tf.reshape(result_sub[FLAGS.n_hidden_units:], [FLAGS._batch_size, FLAGS.n_hidden_units])
tf_time = tf.add(tf_time, result_sub)
# ********time attention*******
# tf_results = tf.add(tf_results, tf_time*(1-add_weights['baseline']))
tf_results = tf.add(tf_results, tf_time * add_weights['time'])
# TODO self-attention 只考虑前面的事件 ok
if trainType == 'event' or trainType == 'all':
# ********event attention*******
tf_event = tf.constant(0.0001)
for i in range(FLAGS.Chain_Lens):
# batch_number * Chain_Lens * n_hidden_units => 按某i个Chain_Lens取数据
result_event = tf.reshape(tf.slice(outputs, [0, i, 0], [-1, 1, -1]), [-1, FLAGS.n_hidden_units])
result_sub = tf.constant(0.0001, shape=[FLAGS.n_hidden_units])
for index in range(FLAGS._batch_size):
event_sum = tf.constant(0.0001)
for j in range(i):
tf.add(event_sum, event[X[index][i]][X[index][j]])
result_sub = tf.concat([result_sub, result_event[index] * event_sum],0)
# batch_number * n_hidden_units
result_sub = tf.reshape(result_sub[FLAGS.n_hidden_units:], [FLAGS._batch_size, FLAGS.n_hidden_units])
tf_event = tf.add(tf_event, result_sub)
# ********event attention*******
# tf_results = tf.add(tf_results, tf_event*(1-add_weights['baseline']))
tf_results = tf.add(tf_results, tf_event * add_weights['event'])
if classifier == 'mlp':
# mlp classifer
mlp_l1 = tf.matmul(tf_results, weights['weight_add']) + biases['l1']
mlp_l2 = tf.nn.relu(mlp_l1)
results = tf.matmul(mlp_l2, weights['out']) + biases['out']
# mlp classifer
# TODO labeling embedding 用上01矩阵 多层gcn
# label embedding
label_embedding = tf.nn.embedding_lookup(embedding, [i for i in range(FLAGS.n_classes)])
# # TODO 邻接矩阵归一化 不要01形式 ok
# adjacency_mat = pickle.load(open('../data/adjacency.regular', 'rb'))
# hidden_label_em = tf.constant([0.1])
#
# # TODO 再乘一个W
# for i in range(label_embedding.shape[0]):
# q = tf.constant(0.1, shape=[FLAGS.embedding_size])
# for j in range(label_embedding.shape[0]):
# if j == i:
# q = tf.add(q, label_embedding[j])
# else:
# q = tf.add(q, label_embedding[j] * adjacency_mat[i][j])
# hidden_label_em = tf.concat([hidden_label_em, q], 0)
# hidden_label_em = tf.reshape(hidden_label_em[1:], [FLAGS.n_classes, FLAGS.embedding_size])
# label embedding
# TODO 最后的GCN MLP部分 大U 25*276 ok
# TODO 拼接后进GCN AxW
if classifier == 'gcn':
# gcn classifier
tf_sequence = tf.reshape(tf.tile(tf_results, [1, FLAGS.n_classes]), [FLAGS._batch_size * FLAGS.n_classes, -1])
tf_label = tf.tile(label_embedding, [FLAGS._batch_size, 1])
tf_concat = tf.reshape(tf.concat([tf_sequence, tf_label], 1), [FLAGS._batch_size, FLAGS.n_classes, -1])
# gcn_l1 = tf.reshape(tf.matmul(tf_concat, weights[trainType+'_gcn']),[_batch_size, n_classes, -1])+biases[trainType]
adjacency_mat = pickle.load(open('../data/adjacency.new', 'rb'))
myarray = np.zeros((25, 25), dtype='float32')
for key1, row in adjacency_mat.items():
for key2, value in row.items():
myarray[key1, key2] = value
# X_scaled = preprocessing.scale(myarray)
gcn_l1 = tf.constant(0.1, shape=[FLAGS.n_classes, FLAGS.n_hidden_units])
for i in range(FLAGS._batch_size):
# gcn_beta = tf.matmul(tf.matmul(X_scaled, tf_concat[i]), weights['baseline_gcn']) + biases['l1']
gcn_beta = tf.matmul(tf.matmul(myarray, tf_concat[i]), weights['baseline_gcn']) + biases['l1']
# gcn_beta = tf.nn.relu(gcn_beta)
# gcn_beta = tf.matmul(tf.matmul(myarray, gcn_beta), weights['gcn_2']) + biases['l2']
# gcn_beta = tf.matmul(tf.matmul(myarray, gcn_beta), weights['gcn_3'])
gcn_l1 = tf.concat([gcn_l1, gcn_beta], 0)
gcn_l1 = tf.reshape(gcn_l1[FLAGS.n_classes:], shape=[FLAGS._batch_size, FLAGS.n_classes, -1])
gcn_l2 = tf.nn.relu(gcn_l1)
results = tf.reshape(tf.matmul(tf.reshape(gcn_l2, [FLAGS._batch_size*FLAGS.n_classes,-1]), weights['out_gcn']),
[FLAGS._batch_size, FLAGS.n_classes]) + biases['out']
# gcn classifier
return results
pred = LSTM(x, weights, biases, time_v, position, event)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
# 预测结果
pred_y = tf.cast(tf.argmax(pred, 1), tf.int32)
train_op = tf.train.AdamOptimizer(FLAGS.lr).minimize(cost)
k = 3 # targets对应的索引是否在最大的前k个数据中
output = tf.nn.in_top_k(pred, tf.argmax(y, 1), k)
accuracy = tf.reduce_mean(tf.cast(output, tf.float32))
# correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver(max_to_keep = 200)
def test_step(data_x, data_y):
# data_x, data_y = shuffle_xy(data_x, data_y)
data_y = np.reshape(data_y, [FLAGS._batch_size, -1])
test_accuracy, test_cost, pred = sess.run([accuracy, cost, pred_y], feed_dict={
x: data_x,
y: data_y,
output_kp: 1.0
})
return test_accuracy, test_cost
with tf.Session(config=config) as sess:
# training
sess.run(init)
epoch_i = 0
# 加载最后一个模型
# saver.restore(sess, '../data/ckpt-gcn/{}{}.ckpt-{}'.format(trainType, classifier, 13004))
print('***TRAINING PROCESS***')
with open('train_result.txt', 'a') as file:
file.write('\n{}__{}__{}__hidden_units:{}__lr:{}__batch:{}__embedding:{}__{}:\n'.format(trainType, classifier, note,
FLAGS.n_hidden_units, FLAGS.lr, FLAGS._batch_size, FLAGS.embedding_size, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
x_mat_list, y_tag_list = shuffle_xy(x_mat_list, y_tag_list)
while epoch_i < FLAGS.epoch:
# while epoch_i < 10:
step = 0
cost_trend = []
while step < training_iters:
batch_xs, batch_ys = next_batch()
batch_ys = np.reshape(batch_ys, [FLAGS._batch_size, -1])
_, total_cost = sess.run([train_op, cost], feed_dict={
x: batch_xs,
y: batch_ys,
output_kp: 0.8
})
cost_trend.append(total_cost)
if step % 1000 == 0:
train_accuracy = sess.run(accuracy, feed_dict={
x: batch_xs,
y: batch_ys,
output_kp: 0.8
})
print("{}_step = {}, total cost = {:.5f}, training accuracy = {:.5f}".format(time.strftime("%H:%M:%S", time.localtime()), step,total_cost.item(), train_accuracy.item()))
saver.save(sess, model_save_path, global_step=epoch_i+step)
step += 1
# saver.save(sess, model_save_path, global_step=epoch_i)
epoch_i += 1
# with open('cost_trend.txt', 'wb') as infile:
# pickle.dump(cost_trend, infile)
# testing
print ('***TRAINING RESULT***EPOCH={}***{}'.format(epoch_i, trainType))
x_mat_list, y_tag_list = shuffle_xy(x_mat_list, y_tag_list)
step = 0
test_accuracy, test_cost = 0.0, 0.0
while step < (trainNum / FLAGS._batch_size):
batch_xs, batch_ys = next_batch()
batch_accuracy, batch_cost = test_step(batch_xs, batch_ys)
test_accuracy += batch_accuracy
test_cost += batch_cost
step += 1
test_accuracy /= step
test_cost /= step
print ("training instance = %d, total cost = %g, training accuracy = %g" % (trainNum, test_cost, test_accuracy))
# file.write('***TRAINING RESULT***EPOCH='+str(epoch_i)+'\n')
# file.write("training instance = %d, total cost = %g, training accuracy = %g" %
# (trainNum, test_cost, test_accuracy)+'\n')
file.write("%g" % test_accuracy + '\n')
| mit |
juhuntenburg/pipelines | src/lsd_lemon/tsnr_2mni.py | 2 | 5624 | from nipype.pipeline.engine import Node, Workflow
import nipype.interfaces.utility as util
import nipype.interfaces.ants as ants
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
import sys
import pandas as pd
'''
Project tsnr / brainmask from
individual structural to MNI152 2mm space
'''
#subject_list= sys.argv[1]
#with open(subject_list, 'r') as f:
# subjects = [line.strip() for line in f]
subjects = list(pd.read_csv('/home/raid3/huntenburg/workspace/lsd_data_paper/lsd_preproc.csv', dtype='str')['ID'])
subjects.sort()
subjects.remove('24945')
subjects.remove('25188')
subjects.remove('26500')
subjects.remove('25019')
subjects.remove('23700')
scans = ['rest1a', 'rest1b', 'rest2a', 'rest2b']
# for some subjects exclude scans
#subjects = ['24945']
#scans = ['rest1a']
#subjects = ['25188']
#scans = ['rest1a', 'rest1b']
#subjects = ['26500', '25019', '23700']
#scans = ['rest1a', 'rest1b', 'rest2a']
# local base and output directory
afs_dir = '/afs/cbs.mpg.de/projects/mar004_lsd-lemon-preproc/probands/'
base_dir = '/nobackup/ilz2/julia_2mni/working_dir/'
out_dir = '/nobackup/ilz2/julia_2mni/tsnr/'
ilz_dir = '/nobackup/ilz2/fix_mni/'
template ='/usr/share/fsl/data/standard/MNI152_T1_2mm_brain.nii.gz'
# workflow
mni = Workflow(name='mni')
mni.base_dir = base_dir
mni.config['execution']['crashdump_dir'] = mni.base_dir + "/crash_files"
# infosource to iterate over subjects
subject_infosource=Node(util.IdentityInterface(fields=['subject_id']),
name='subject_infosource')
subject_infosource.iterables=('subject_id', subjects)
# infosource to iterate over scans
scan_infosource=Node(util.IdentityInterface(fields=['scan']),
name='scan_infosource')
scan_infosource.iterables=('scan', scans)
# select files
templates_1={'tsnr': '{subject_id}/preprocessed/lsd_resting/{scan}/realign/*tsnr.nii.gz',
'anat_resamp' : '{subject_id}/preprocessed/lsd_resting/{scan}/coregister/T1_resampled.nii.gz',
#'affine': '{subject_id}/preprocessed/anat/transforms2mni/transform0GenericAffine.mat',
#'warp': '{subject_id}/preprocessed/anat/transforms2mni/transform1Warp.nii.gz',
'func_warp' : '{subject_id}/preprocessed/lsd_resting/{scan}/coregister/transforms2anat/fullwarpfield.nii.gz'
}
selectfiles_1 = Node(nio.SelectFiles(templates_1,
base_directory=afs_dir),
name="selectfiles_1")
mni.connect([(subject_infosource, selectfiles_1, [('subject_id', 'subject_id')]),
(scan_infosource, selectfiles_1, [('scan', 'scan')])])
# select files
templates_2={ 'affine': '{subject_id}/preprocessed/anat/transforms2mni/transform0GenericAffine.mat',
'warp': '{subject_id}/preprocessed/anat/transforms2mni/transform1Warp.nii.gz',
}
selectfiles_2 = Node(nio.SelectFiles(templates_2,
base_directory=ilz_dir),
name="selectfiles_2")
mni.connect([(subject_infosource, selectfiles_2, [('subject_id', 'subject_id')])])
# applymoco premat and fullwarpfield
applywarp = Node(fsl.ApplyWarp(interp='spline',
relwarp=True,
datatype='float'),
name='applywarp')
mni.connect([(selectfiles_1, applywarp, [('tsnr', 'in_file'),
('func_warp', 'field_file'),
('anat_resamp', 'ref_file')])
])
# make filelist
translist = Node(util.Merge(2),
name='translist')
mni.connect([(selectfiles_2, translist, [('affine', 'in2'),
('warp', 'in1')])])
def make_name(sub, scan):
return '%s_%s_tsnr_mni.nii.gz' %(sub, scan)
makename = Node(util.Function(input_names=['sub', 'scan'],
output_names='fname',
function=make_name),
name='makename')
mni.connect([(subject_infosource, makename, [('subject_id', 'sub')]),
(scan_infosource, makename, [('scan', 'scan')])])
# apply all transforms
applytransform = Node(ants.ApplyTransforms(input_image_type = 3,
#output_image='rest_preprocessed2mni.nii.gz',
interpolation = 'BSpline',
invert_transform_flags=[False, False]),
name='applytransform')
applytransform.inputs.reference_image=template
mni.connect([(applywarp, applytransform, [('out_file', 'input_image')]),
(translist, applytransform, [('out', 'transforms')]),
(makename, applytransform, [('fname', 'output_image')])
])
# tune down image to float
#changedt = Node(fsl.ChangeDataType(output_datatype='float',
# out_file='tsnr2mni.nii.gz'),
# name='changedt')
#changedt.plugin_args={'submit_specs': 'request_memory = 30000'}
#mni.connect([(applytransform, changedt, [('output_image', 'in_file')])])
# make base directory
# def makebase(subject_id, out_dir):
# return out_dir%subject_id
# sink
sink = Node(nio.DataSink(base_directory=out_dir,
parameterization=False),
name='sink')
mni.connect([#(subject_infosource, sink, [(('subject_id', makebase, out_dir), 'base_directory')]),
(applytransform, sink, [('output_image', '@tsnr2mni')])
])
mni.run(plugin='MultiProc', plugin_args={'n_procs' : 40}) | mit |
tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/matplotlib/scale.py | 69 | 13414 | import textwrap
import numpy as np
from numpy import ma
MaskedArray = ma.MaskedArray
from cbook import dedent
from ticker import NullFormatter, ScalarFormatter, LogFormatterMathtext, Formatter
from ticker import NullLocator, LogLocator, AutoLocator, SymmetricalLogLocator, FixedLocator
from transforms import Transform, IdentityTransform
class ScaleBase(object):
"""
The base class for all scales.
Scales are separable transformations, working on a single dimension.
Any subclasses will want to override:
- :attr:`name`
- :meth:`get_transform`
And optionally:
- :meth:`set_default_locators_and_formatters`
- :meth:`limit_range_for_scale`
"""
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` object
associated with this scale.
"""
raise NotImplementedError
def set_default_locators_and_formatters(self, axis):
"""
Set the :class:`~matplotlib.ticker.Locator` and
:class:`~matplotlib.ticker.Formatter` objects on the given
axis to match this scale.
"""
raise NotImplementedError
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Returns the range *vmin*, *vmax*, possibly limited to the
domain supported by this scale.
*minpos* should be the minimum positive value in the data.
This is used by log scales to determine a minimum value.
"""
return vmin, vmax
class LinearScale(ScaleBase):
"""
The default linear scale.
"""
name = 'linear'
def __init__(self, axis, **kwargs):
pass
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to reasonable defaults for
linear scaling.
"""
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_locator(NullLocator())
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
The transform for linear scaling is just the
:class:`~matplotlib.transforms.IdentityTransform`.
"""
return IdentityTransform()
def _mask_non_positives(a):
"""
Return a Numpy masked array where all non-positive values are
masked. If there are no non-positive values, the original array
is returned.
"""
mask = a <= 0.0
if mask.any():
return ma.MaskedArray(a, mask=mask)
return a
class LogScale(ScaleBase):
"""
A standard logarithmic scale. Care is taken so non-positive
values are not plotted.
For computational efficiency (to push as much as possible to Numpy
C code in the common cases), this scale provides different
transforms depending on the base of the logarithm:
- base 10 (:class:`Log10Transform`)
- base 2 (:class:`Log2Transform`)
- base e (:class:`NaturalLogTransform`)
- arbitrary base (:class:`LogTransform`)
"""
name = 'log'
class Log10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
a = _mask_non_positives(a * 10.0)
if isinstance(a, MaskedArray):
return ma.log10(a)
return np.log10(a)
def inverted(self):
return LogScale.InvertedLog10Transform()
class InvertedLog10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
return ma.power(10.0, a) / 10.0
def inverted(self):
return LogScale.Log10Transform()
class Log2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
a = _mask_non_positives(a * 2.0)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(2)
return np.log2(a)
def inverted(self):
return LogScale.InvertedLog2Transform()
class InvertedLog2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
return ma.power(2.0, a) / 2.0
def inverted(self):
return LogScale.Log2Transform()
class NaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
a = _mask_non_positives(a * np.e)
if isinstance(a, MaskedArray):
return ma.log(a)
return np.log(a)
def inverted(self):
return LogScale.InvertedNaturalLogTransform()
class InvertedNaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
return ma.power(np.e, a) / np.e
def inverted(self):
return LogScale.NaturalLogTransform()
class LogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
a = _mask_non_positives(a * self.base)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(self.base)
return np.log(a) / np.log(self.base)
def inverted(self):
return LogScale.InvertedLogTransform(self.base)
class InvertedLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
return ma.power(self.base, a) / self.base
def inverted(self):
return LogScale.LogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
subs = kwargs.pop('subsy', None)
if base == 10.0:
self._transform = self.Log10Transform()
elif base == 2.0:
self._transform = self.Log2Transform()
elif base == np.e:
self._transform = self.NaturalLogTransform()
else:
self._transform = self.LogTransform(base)
self.base = base
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
log scaling.
"""
axis.set_major_locator(LogLocator(self.base))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(LogLocator(self.base, self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`~matplotlib.transforms.Transform` instance
appropriate for the given logarithm base.
"""
return self._transform
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to positive values.
"""
return (vmin <= 0.0 and minpos or vmin,
vmax <= 0.0 and minpos or vmax)
class SymmetricalLogScale(ScaleBase):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
name = 'symlog'
class SymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._linadjust = (np.log(linthresh) / self._log_base) / linthresh
def transform(self, a):
a = np.asarray(a)
sign = np.sign(a)
masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
log = sign * ma.log(np.abs(masked)) / self._log_base
if masked.mask.any():
return np.asarray(ma.where(masked.mask,
a * self._linadjust,
log))
else:
return np.asarray(log)
def inverted(self):
return SymmetricalLogScale.InvertedSymmetricalLogTransform(self.base, self.linthresh)
class InvertedSymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._log_linthresh = np.log(linthresh) / self._log_base
self._linadjust = linthresh / (np.log(linthresh) / self._log_base)
def transform(self, a):
a = np.asarray(a)
return np.where(a <= self._log_linthresh,
np.where(a >= -self._log_linthresh,
a * self._linadjust,
-(np.power(self.base, -a))),
np.power(self.base, a))
def inverted(self):
return SymmetricalLogScale.SymmetricalLogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*linthreshx*/*linthreshy*:
The range (-*x*, *x*) within which the plot is linear (to
avoid having the plot go to infinity around zero).
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
linthresh = kwargs.pop('linthreshx', 2.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
linthresh = kwargs.pop('linthreshy', 2.0)
subs = kwargs.pop('subsy', None)
self._transform = self.SymmetricalLogTransform(base, linthresh)
self.base = base
self.linthresh = linthresh
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
symmetrical log scaling.
"""
axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(), self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`SymmetricalLogTransform` instance.
"""
return self._transform
_scale_mapping = {
'linear' : LinearScale,
'log' : LogScale,
'symlog' : SymmetricalLogScale
}
def get_scale_names():
names = _scale_mapping.keys()
names.sort()
return names
def scale_factory(scale, axis, **kwargs):
"""
Return a scale class by name.
ACCEPTS: [ %(names)s ]
"""
scale = scale.lower()
if scale is None:
scale = 'linear'
if scale not in _scale_mapping:
raise ValueError("Unknown scale type '%s'" % scale)
return _scale_mapping[scale](axis, **kwargs)
scale_factory.__doc__ = dedent(scale_factory.__doc__) % \
{'names': " | ".join(get_scale_names())}
def register_scale(scale_class):
"""
Register a new kind of scale.
*scale_class* must be a subclass of :class:`ScaleBase`.
"""
_scale_mapping[scale_class.name] = scale_class
def get_scale_docs():
"""
Helper function for generating docstrings related to scales.
"""
docs = []
for name in get_scale_names():
scale_class = _scale_mapping[name]
docs.append(" '%s'" % name)
docs.append("")
class_docs = dedent(scale_class.__init__.__doc__)
class_docs = "".join([" %s\n" %
x for x in class_docs.split("\n")])
docs.append(class_docs)
docs.append("")
return "\n".join(docs)
| gpl-3.0 |
alvarofierroclavero/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
TenninYan/Perceptron | ch2/knn_iris_plot.py | 2 | 1745 | #coding: utf-8
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import datasets, neighbors
"""
PRML 2.5.2 再近傍法
irisデータのK近傍法結果をプロット
"""
if __name__ == "__main__":
# 近傍のいくつの点を見るか?
K = 15
# irisデータをロード
iris = datasets.load_iris()
# irisの特徴量は3次元だが2次元まで用いる
# データを平面にプロットできるようにするため
X = iris.data[:, :2]
y = iris.target
# メッシュのステップサイズ
h = 0.02
# カラーマップを作成
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# 学習器を訓練
knn = neighbors.KNeighborsClassifier(K)
knn.fit(X, y)
# 座標範囲を決定
# 各特徴量次元の最小、最大の値の+-1の範囲で表示
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
# 平面をhxhのサイズのグリッドに分割して各場所での分類結果を求める
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# グリッドの各場所の分類結果をカラープロット
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# 訓練データをプロット
# クラスによって色を変える
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.title("3-class classification (K = %i)" % K)
pl.show()
| mit |
yuealves/subset-visualization | code/s3.read_twenty_news.py | 1 | 1211 | import os
import random
from os.path import dirname
import pandas as pd
dataset_dir = os.path.join(dirname(dirname(__file__)), "datasets")
dataset_news_dir = os.path.join(dataset_dir, "twenty_newsgroups")
code_dir = dirname(__file__)
output_dir = os.path.join(code_dir, "lda_output")
trainSetPath = os.path.join(dataset_news_dir, "20news-bydate-train")
def read_twenty_news_corpus(directory=trainSetPath):
data = []
categories = os.listdir(directory)
for category in categories:
subdir = os.path.join(directory, category)
for filename in os.listdir(subdir):
data.append(dict(fname=filename, category=category))
df = pd.DataFrame(data)
def f(r):
return os.path.join(directory, r['category'], r['fname'])
df['fullpath'] = df.apply(f, axis=1)
df['content'] = df.apply(lambda r: open(r['fullpath'],
encoding="latin1").read(), axis=1)
return df
def select_subsets_randomly(dataframe, subsetSize):
corpusSize = len(dataframe)
selected = random.sample(range(corpusSize), subsetSize)
return dataframe.ix[selected]
df = read_twenty_news_corpus()
| mit |
somethingnew2-0/CS640-PA2 | bufferbloat/helper.py | 8 | 3406 | '''
Helper module for the plot scripts.
'''
import re
import itertools
import matplotlib as m
import os
if os.uname()[0] == "Darwin":
m.use("MacOSX")
else:
m.use("Agg")
import matplotlib.pyplot as plt
import argparse
import math
#import termcolor as T
def read_list(fname, delim=','):
lines = open(fname).xreadlines()
ret = []
for l in lines:
ls = l.strip().split(delim)
ls = map(lambda e: '0' if e.strip() == '' or e.strip() == 'ms' or e.strip() == 's' else e, ls)
ret.append(ls)
return ret
def ewma(alpha, values):
if alpha == 0:
return values
ret = []
prev = 0
for v in values:
prev = alpha * prev + (1 - alpha) * v
ret.append(prev)
return ret
def col(n, obj = None, clean = lambda e: e):
"""A versatile column extractor.
col(n, [1,2,3]) => returns the nth value in the list
col(n, [ [...], [...], ... ] => returns the nth column in this matrix
col('blah', { ... }) => returns the blah-th value in the dict
col(n) => partial function, useful in maps
"""
if obj == None:
def f(item):
return clean(item[n])
return f
if type(obj) == type([]):
if len(obj) > 0 and (type(obj[0]) == type([]) or type(obj[0]) == type({})):
return map(col(n, clean=clean), obj)
if type(obj) == type([]) or type(obj) == type({}):
try:
return clean(obj[n])
except:
#print T.colored('col(...): column "%s" not found!' % (n), 'red')
return None
# We wouldn't know what to do here, so just return None
#print T.colored('col(...): column "%s" not found!' % (n), 'red')
return None
def transpose(l):
return zip(*l)
def avg(lst):
return sum(map(float, lst)) / len(lst)
def stdev(lst):
mean = avg(lst)
var = avg(map(lambda e: (e - mean)**2, lst))
return math.sqrt(var)
def xaxis(values, limit):
l = len(values)
return zip(*map(lambda (x,y): (x*1.0*limit/l, y), enumerate(values)))
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def cdf(values):
values.sort()
prob = 0
l = len(values)
x, y = [], []
for v in values:
prob += 1.0 / l
x.append(v)
y.append(prob)
return (x, y)
def parse_cpu_usage(fname, nprocessors=8):
"""Returns (user,system,nice,iowait,hirq,sirq,steal) tuples
aggregated over all processors. DOES NOT RETURN IDLE times."""
data = grouper(nprocessors, open(fname).readlines())
"""Typical line looks like:
Cpu0 : 0.0%us, 1.0%sy, 0.0%ni, 97.0%id, 0.0%wa, 0.0%hi, 2.0%si, 0.0%st
"""
ret = []
for collection in data:
total = [0]*8
for cpu in collection:
usages = cpu.split(':')[1]
usages = map(lambda e: e.split('%')[0],
usages.split(','))
for i in xrange(len(usages)):
total[i] += float(usages[i])
total = map(lambda t: t/nprocessors, total)
# Skip idle time
ret.append(total[0:3] + total[4:])
return ret
def pc95(lst):
l = len(lst)
return sorted(lst)[ int(0.95 * l) ]
def pc99(lst):
l = len(lst)
return sorted(lst)[ int(0.99 * l) ]
def coeff_variation(lst):
return stdev(lst) / avg(lst)
| mit |
nsoojin/coursera-ml-py | machine-learning-ex8/ex8/ex8_cofi.py | 1 | 7267 | import matplotlib.pyplot as plt
import numpy as np
import scipy.io as scio
import scipy.optimize as opt
import cofiCostFunction as ccf
import checkCostFunction as cf
import loadMovieList as lm
import normalizeRatings as nr
plt.ion()
np.set_printoptions(formatter={'float': '{: 0.6f}'.format})
# ===================== Part 1: Loading movie ratings dataset =====================
# We will start by loading the movie ratings dataset to understand the
# structure of the data
print('Loading movie ratings dataset.')
# Load data
data = scio.loadmat('ex8_movies.mat')
Y = data['Y']
R = data['R']
# Y is a 1682 x 943 2-d ndarray, containing ratings 1-5 of 1682 movies on 943 users
#
# R is a 1682 x 943 2-d ndarray, where R[i, j] = 1 if and only if user j gave a
# rating to movie i
# From the matrix, we can compute statistics like average rating.
print('Average ratings for movie 0(Toy Story): {:0.6f}/5'.format(np.mean(Y[0, np.where(R[0] == 1)])))
# We can visualize the ratings matrix by plotting it with plt.imshow
plt.figure()
plt.imshow(Y)
plt.colorbar()
plt.xlabel('Users')
plt.ylabel('Movies')
input('Program paused. Press ENTER to continue')
# ===================== Part 2: Collaborative Filtering Cost function =====================
# You will now implement the cost function for collaborative filtering.
# To help you debug your cost function, we have included set of weights
# that we trained on that. Specifically, you should complete the code in
# cofiCostFunc.py to return cost.
#
# Load pre-trained weights (X, theta, num_users, num_movies, num_features)
data = scio.loadmat('ex8_movieParams.mat')
X = data['X']
theta = data['Theta']
num_users = data['num_users']
num_movies = data['num_movies']
num_features = data['num_features']
# Reduce the data set size so that this runs faster
num_users = 4
num_movies = 5
num_features = 3
X = X[0:num_movies, 0:num_features]
theta = theta[0:num_users, 0:num_features]
Y = Y[0:num_movies, 0:num_users]
R = R[0:num_movies, 0:num_users]
# Evaluate cost function
cost, grad = ccf.cofi_cost_function(np.concatenate((X.flatten(), theta.flatten())), Y, R, num_users, num_movies, num_features, 0)
print('Cost at loaded parameters: {:0.2f}\n(this value should be about 22.22)'.format(cost))
input('Program paused. Press ENTER to continue')
# ===================== Part 3: Collaborative Filtering Gradient =====================
# Once your cost function matches up with ours, you should now implement
# the collaborative filtering gradient function. Specifically, you should
# complete the code in cofiCostFunction.py to return the grad argument.
#
print('Checking gradients (without regularization) ...')
# Check gradients by running check_cost_function()
cf.check_cost_function(0)
input('Program paused. Press ENTER to continue')
# ===================== Part 4: Collaborative Filtering Cost Regularization =====================
# Now, you should implement regularization for the cost function for
# collaborative filtering. You can implement it by adding the cost of
# regularization to the original cost computation.
#
# Evaluate cost function
cost, _ = ccf.cofi_cost_function(np.concatenate((X.flatten(), theta.flatten())), Y, R, num_users, num_movies, num_features, 1.5)
print('Cost at loaded parameters (lambda = 1.5): {:0.2f}\n'
'(this value should be about 31.34)'.format(cost))
input('Program paused. Press ENTER to continue')
# ===================== Part 5: Collaborative Filtering Gradient Regularization =====================
# Once your cost matches up with ours, you should proceed to implement
# regularization for the gradient.
#
print('Checking Gradients (with regularization) ...')
# Check gradients by running check_cost_function
cf.check_cost_function(1.5)
input('Program paused. Press ENTER to continue')
# ===================== Part 6: Entering ratings for a new user =====================
# Before we will train the collaborative filtering model, we will first
# add ratings that correspond to a new user that we just observed. This
# part of the code will also allow you to put in your own ratings for the
# movies in our dataset!
#
movie_list = lm.load_movie_list()
# Initialize my ratings
my_ratings = np.zeros(len(movie_list))
# Check the file movie_ids.txt for id of each movie in our dataset
# For example, Toy Story (1995) has ID 0, so to rate it "4", you can set
my_ratings[0] = 4
# Or suppose did not enjoy Silence of the lambs (1991), you can set
my_ratings[97] = 2
# We have selected a few movies we liked / did not like and the ratings we
# gave are as follows:
my_ratings[6] = 3
my_ratings[11] = 5
my_ratings[53] = 4
my_ratings[63] = 5
my_ratings[65] = 3
my_ratings[68] = 5
my_ratings[182] = 4
my_ratings[225] = 5
my_ratings[354] = 5
print('New user ratings:\n')
for i in range(my_ratings.size):
if my_ratings[i] > 0:
print('Rated {} for {}'.format(my_ratings[i], movie_list[i]))
input('Program paused. Press ENTER to continue')
# ===================== Part 7: Learning Movie Ratings =====================
# Now, you will train the collaborative filtering model on a movie rating
# dataset of 1682 movies and 943 users
#
print('Training collaborative filtering ...\n'
'(this may take 1 ~ 2 minutes)')
# Load data
data = scio.loadmat('ex8_movies.mat')
Y = data['Y']
R = data['R']
# Y is a 1682x943 matrix, containing ratings (1-5) of 1682 movies by
# 943 users
#
# R is a 1682x943 matrix, where R[i,j] = 1 if and only if user j gave a
# rating to movie i
# Add our own ratings to the data matrix
Y = np.c_[my_ratings, Y]
R = np.c_[(my_ratings != 0), R]
# Normalize Ratings
Ynorm, Ymean = nr.normalize_ratings(Y, R)
# Useful values
num_users = Y.shape[1]
num_movies = Y.shape[0]
num_features = 10
# Set initial parameters (theta, X)
X = np.random.randn(num_movies, num_features)
theta = np.random.randn(num_users, num_features)
initial_params = np.concatenate([X.flatten(), theta.flatten()])
lmd = 10
def cost_func(p):
return ccf.cofi_cost_function(p, Ynorm, R, num_users, num_movies, num_features, lmd)[0]
def grad_func(p):
return ccf.cofi_cost_function(p, Ynorm, R, num_users, num_movies, num_features, lmd)[1]
theta, *unused = opt.fmin_cg(cost_func, fprime=grad_func, x0=initial_params, maxiter=100, disp=False, full_output=True)
# Unfold the returned theta back into U and W
X = theta[0:num_movies * num_features].reshape((num_movies, num_features))
theta = theta[num_movies * num_features:].reshape((num_users, num_features))
print('Recommender system learning completed')
print(theta)
input('Program paused. Press ENTER to continue')
# ===================== Part 8: Recommendation for you =====================
# After training the model, you can now make recommendations by computing
# the predictions matrix.
#
p = np.dot(X, theta.T)
my_predictions = p[:, 0] + Ymean
indices = np.argsort(my_predictions)[::-1]
print('\nTop recommendations for you:')
for i in range(10):
j = indices[i]
print('Predicting rating {:0.1f} for movie {}'.format(my_predictions[j], movie_list[j]))
print('\nOriginal ratings provided:')
for i in range(my_ratings.size):
if my_ratings[i] > 0:
print('Rated {} for {}'.format(my_ratings[i], movie_list[i]))
input('ex8_cofi Finished. Press ENTER to exit')
| mit |
dafx/aubio | python/demos/demo_tempo_plot.py | 5 | 2475 | #! /usr/bin/env python
import sys
from aubio import tempo, source
win_s = 512 # fft size
hop_s = win_s // 2 # hop size
if len(sys.argv) < 2:
print("Usage: %s <filename> [samplerate]" % sys.argv[0])
sys.exit(1)
filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
o = tempo("default", win_s, hop_s, samplerate)
# tempo detection delay, in samples
# default to 4 blocks delay to catch up with
delay = 4. * hop_s
# list of beats, in samples
beats = []
# total number of frames read
total_frames = 0
while True:
samples, read = s()
is_beat = o(samples)
if is_beat:
this_beat = o.get_last_s()
beats.append(this_beat)
total_frames += read
if read < hop_s: break
if len(beats) > 1:
# do plotting
from numpy import mean, median, diff
import matplotlib.pyplot as plt
bpms = 60./ diff(beats)
print('mean period: %.2fbpm, median: %.2fbpm' % (mean(bpms), median(bpms)))
print('plotting %s' % filename)
plt1 = plt.axes([0.1, 0.75, 0.8, 0.19])
plt2 = plt.axes([0.1, 0.1, 0.8, 0.65], sharex = plt1)
plt.rc('lines',linewidth='.8')
for stamp in beats: plt1.plot([stamp, stamp], [-1., 1.], '-r')
plt1.axis(xmin = 0., xmax = total_frames / float(samplerate) )
plt1.xaxis.set_visible(False)
plt1.yaxis.set_visible(False)
# plot actual periods
plt2.plot(beats[1:], bpms, '-', label = 'raw')
# plot moving median of 5 last periods
median_win_s = 5
bpms_median = [ median(bpms[i:i + median_win_s:1]) for i in range(len(bpms) - median_win_s ) ]
plt2.plot(beats[median_win_s+1:], bpms_median, '-', label = 'median of %d' % median_win_s)
# plot moving median of 10 last periods
median_win_s = 20
bpms_median = [ median(bpms[i:i + median_win_s:1]) for i in range(len(bpms) - median_win_s ) ]
plt2.plot(beats[median_win_s+1:], bpms_median, '-', label = 'median of %d' % median_win_s)
plt2.axis(ymin = min(bpms), ymax = max(bpms))
#plt2.axis(ymin = 40, ymax = 240)
plt.xlabel('time (mm:ss)')
plt.ylabel('beats per minute (bpm)')
plt2.set_xticklabels([ "%02d:%02d" % (t/60, t%60) for t in plt2.get_xticks()[:-1]], rotation = 50)
#plt.savefig('/tmp/t.png', dpi=200)
plt2.legend()
plt.show()
else:
print('mean period: %.2fbpm, median: %.2fbpm' % (0, 0))
print('plotting %s' % filename)
| gpl-3.0 |
subodhchhabra/pandashells | pandashells/test/p_rand_tests.py | 10 | 3365 | #! /usr/bin/env python
from mock import patch, MagicMock
from unittest import TestCase
from pandashells.bin.p_rand import fill_default_mu, main
class TestFillDefaultMu(TestCase):
def test_normal_mu_default(self):
args = MagicMock(type=['normal'], mu=None)
fill_default_mu(args)
self.assertEqual(args.mu, [0.])
def test_normal_mu_non_default(self):
args = MagicMock(type=['normal'], mu=[7.])
fill_default_mu(args)
self.assertEqual(args.mu, [7.])
def test_poisson_mu_default(self):
args = MagicMock(type=['poisson'], mu=None)
fill_default_mu(args)
self.assertEqual(args.mu, [1.])
def test_poisson_mu_non_default(self):
args = MagicMock(type=['normal'], mu=[7.])
fill_default_mu(args)
self.assertEqual(args.mu, [7.])
class TestMain(TestCase):
@patch(
'pandashells.bin.p_rand.sys.argv',
'p.rand -n 10 -t uniform'.split())
@patch('pandashells.bin.p_rand.io_lib.df_to_output')
def test_uniform(self, df_to_output_mock):
main()
df = df_to_output_mock.call_args_list[0][0][1]
self.assertEqual(len(df), 10)
self.assertTrue(all([x <= 1 for x in df.c0]))
self.assertTrue(all([x >= 0 for x in df.c0]))
@patch(
'pandashells.bin.p_rand.sys.argv',
'p.rand -n 10 -t normal'.split())
@patch('pandashells.bin.p_rand.io_lib.df_to_output')
def test_normal(self, df_to_output_mock):
main()
df = df_to_output_mock.call_args_list[0][0][1]
self.assertEqual(len(df), 10)
self.assertTrue(df.c0.mean() < 100)
self.assertTrue(df.c0.mean() > -100)
@patch(
'pandashells.bin.p_rand.sys.argv',
'p.rand -n 10 -t poisson'.split())
@patch('pandashells.bin.p_rand.io_lib.df_to_output')
def test_poisson(self, df_to_output_mock):
main()
df = df_to_output_mock.call_args_list[0][0][1]
self.assertEqual(len(df), 10)
self.assertTrue(all([round(x, 0) == x for x in df.c0]))
self.assertTrue(all([x >= 0 for x in df.c0]))
@patch(
'pandashells.bin.p_rand.sys.argv',
'p.rand -n 10 -t beta'.split())
@patch('pandashells.bin.p_rand.io_lib.df_to_output')
def test_beta(self, df_to_output_mock):
main()
df = df_to_output_mock.call_args_list[0][0][1]
self.assertEqual(len(df), 10)
self.assertTrue(all([x >= 0 for x in df.c0]))
self.assertTrue(all([x <= 1 for x in df.c0]))
@patch(
'pandashells.bin.p_rand.sys.argv',
'p.rand -n 10 -t gamma'.split())
@patch('pandashells.bin.p_rand.io_lib.df_to_output')
def test_gamma(self, df_to_output_mock):
main()
df = df_to_output_mock.call_args_list[0][0][1]
self.assertEqual(len(df), 10)
self.assertTrue(all([x >= 0 for x in df.c0]))
@patch(
'pandashells.bin.p_rand.sys.argv',
'p.rand -n 10 -t binomial'.split())
@patch('pandashells.bin.p_rand.io_lib.df_to_output')
def test_binomial(self, df_to_output_mock):
main()
df = df_to_output_mock.call_args_list[0][0][1]
self.assertEqual(len(df), 10)
self.assertTrue(all([round(x, 0) == x for x in df.c0]))
self.assertTrue(all([x >= 0 for x in df.c0]))
self.assertTrue(all([x <= 10 for x in df.c0]))
| bsd-2-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/userdemo/annotate_simple02.py | 1 | 1228 | """
=================
Annotate Simple02
=================
"""
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig, ax = plt.subplots(figsize=(3, 3))
ax.annotate("Test",
xy=(0.2, 0.2), xycoords='data',
xytext=(0.8, 0.8), textcoords='data',
size=20, va="center", ha="center",
arrowprops=dict(arrowstyle="simple",
connectionstyle="arc3,rad=-0.2"),
)
pltshow(plt)
| mit |
ldamewood/kaggle | revenue/linear.py | 1 | 3314 | # -*- coding: utf-8 -*-
import numpy as np
import progressbar
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.svm import SVR
from revenue import RevenueCompetition, RevenueTransform
if __name__ == '__main__':
train_size = 0.75
cls = RandomForestClassifier()
reg = RandomForestRegressor(n_estimators=20, max_features=5, max_depth=None,
min_samples_split=2, min_samples_leaf=1,
max_leaf_nodes=None, bootstrap=True,
oob_score=False, n_jobs=-1)
reg = SVR(C=10., gamma=0.1)
train_df_orig = RevenueCompetition.load_data()
y = train_df_orig['revenue'].values
del train_df_orig['revenue']
test_df_orig = RevenueCompetition.load_data(train=False)
full_df = train_df_orig.append(test_df_orig)
print("Transforming...")
tr = RevenueTransform(rescale=True)
tr.fit(full_df)
X = tr.transform(train_df_orig).values
print('Classify the outliers...')
ly = np.log(y)
ym = ly.mean()
ys = ly.std()
s = np.empty(ly.shape[0])
s[(ly-ym)/ys <= -2] = 0
s[np.logical_and((ly-ym)/ys > -2,(ly-ym)/ys <= -1)] = 1
s[np.logical_and((ly-ym)/ys > -1,(ly-ym)/ys <= 1)] = 2
s[np.logical_and((ly-ym)/ys > 1,(ly-ym)/ys <= 2)] = 3
s[(ly-ym)/ys > 2] = 4
train_index, valid_index = list(StratifiedShuffleSplit(s, n_iter=1, train_size=train_size, random_state=0))[0]
X_train, X_valid = X[train_index], X[valid_index]
y_train, y_valid = y[train_index], y[valid_index]
s_train, s_valid = s[train_index], s[valid_index]
assert (np.unique(s_valid) == np.unique(s_train)).all()
print("Train outlier model...")
cls.fit(X_train,s_train)
#
print("Appending outlier predictions...")
train_df = train_df_orig.copy()
predstd = cls.predict(tr.transform(train_df_orig).values)
# train_df['prob0'] = predstd == 0
# train_df['prob1'] = predstd == 1
# train_df['prob2'] = predstd == 2
# train_df['prob3'] = predstd == 3
train_df['prob4'] = predstd == 4
X = tr.transform(train_df).values
print("Training regression model...")
reg.fit(X_train, np.log(y_train))
print("Validating regression model...")
p = progressbar.ProgressBar(maxval=len(y_valid)).start()
mse = []
for i in range(len(y_valid)):
X_sample = X_valid[i]
y_sample = y_valid[i]
y_pred = np.exp(reg.predict([X_sample]))
mse.append(np.sqrt(np.mean((y_pred - y_sample)**2)))
p.update(i)
p.update(len(y_valid))
print('')
print("Regression mse:")
print(np.mean(mse), np.std(mse)/np.sqrt(len(y_valid)))
print('Fit with all data')
reg.fit(X,np.log(y))
print('Transform test set...')
test_df = test_df_orig.copy()
predstd = cls.predict(tr.transform(test_df_orig).values)
# test_df['prob0'] = predstd == 0
# test_df['prob1'] = predstd == 1
# test_df['prob2'] = predstd == 2
# test_df['prob3'] = predstd == 3
test_df['prob4'] = predstd == 4
X = tr.transform(test_df).values
print('Predict test set...')
yp = np.exp(reg.predict(X))
RevenueCompetition.save_data(yp, 'data/revenue_20150431_03.csv') | mit |
mmottahedi/nilmtk | nilmtk/disaggregate/combinatorial_optimisation.py | 5 | 12640 | from __future__ import print_function, division
from datetime import datetime
import pandas as pd
import numpy as np
from ..utils import find_nearest
from ..feature_detectors import cluster
from ..timeframe import merge_timeframes, TimeFrame
from nilmtk.exceptions import VampirePowerAlreadyInModelError
# Fix the seed for repeatability of experiments
SEED = 42
np.random.seed(SEED)
class CombinatorialOptimisation(object):
"""1 dimensional combinatorial optimisation NILM algorithm.
Attributes
----------
model : list of dicts
Each dict has these keys:
states : list of ints (the power (Watts) used in different states)
training_metadata : ElecMeter or MeterGroup object used for training
this set of states. We need this information because we
need the appliance type (and perhaps some other metadata)
for each model.
"""
def __init__(self):
self.model = []
self.state_combinations = None
self.MIN_CHUNK_LENGTH = 100
def train(self, metergroup, num_states_dict={}, **load_kwargs):
"""Train using 1D CO. Places the learnt model in the `model` attribute.
Parameters
----------
metergroup : a nilmtk.MeterGroup object
Notes
-----
* only uses first chunk for each meter (TODO: handle all chunks).
"""
if self.model:
raise RuntimeError(
"This implementation of Combinatorial Optimisation"
" does not support multiple calls to `train`.")
num_meters = len(metergroup.meters)
if num_meters > 12:
max_num_clusters = 2
else:
max_num_clusters = 3
for i, meter in enumerate(metergroup.submeters().meters):
print("Training model for submeter '{}'".format(meter))
for chunk in meter.power_series(**load_kwargs):
num_total_states = num_states_dict.get(meter)
if num_total_states is not None:
num_on_states = num_total_states - 1
else:
num_on_states = None
states = cluster(chunk, max_num_clusters, num_on_states)
self.model.append({
'states': states,
'training_metadata': meter})
break # TODO handle multiple chunks per appliance
# Get centroids
# If we import sklearn at the top of the file then auto doc fails.
from sklearn.utils.extmath import cartesian
centroids = [model['states'] for model in self.model]
self.state_combinations = cartesian(centroids)
# self.state_combinations is a 2D array
# each column is a chan
# each row is a possible combination of power demand values e.g.
# [[0, 0, 0, 0], [0, 0, 0, 100], [0, 0, 50, 0],
# [0, 0, 50, 100], ...]
print("Done training!")
def disaggregate(self, mains, output_datastore, **load_kwargs):
'''Disaggregate mains according to the model learnt previously.
Parameters
----------
mains : nilmtk.ElecMeter or nilmtk.MeterGroup
output_datastore : instance of nilmtk.DataStore subclass
For storing power predictions from disaggregation algorithm.
output_name : string, optional
The `name` to use in the metadata for the `output_datastore`.
e.g. some sort of name for this experiment. Defaults to
"NILMTK_CO_<date>"
resample_seconds : number, optional
The desired sample period in seconds.
**load_kwargs : key word arguments
Passed to `mains.power_series(**kwargs)`
'''
# Vampire power
vampire_power = mains.vampire_power()
# Extract optional parameters from load_kwargs
date_now = datetime.now().isoformat().split('.')[0]
output_name = load_kwargs.pop('output_name', 'NILMTK_CO_' + date_now)
resample_seconds = load_kwargs.pop('resample_seconds', 60)
load_kwargs['sections'] = load_kwargs.pop(
'sections', mains.good_sections())
load_kwargs.setdefault('resample', True)
load_kwargs.setdefault('sample_period', resample_seconds)
timeframes = []
building_path = '/building{}'.format(mains.building())
mains_data_location = '{}/elec/meter1'.format(building_path)
data_is_available = False
for chunk in mains.power_series(**load_kwargs):
# Check that chunk is sensible size before resampling
if len(chunk) < self.MIN_CHUNK_LENGTH:
continue
# Record metadata
timeframes.append(chunk.timeframe)
measurement = chunk.name
appliance_powers = self.disaggregate_chunk(chunk, vampire_power)
for i, model in enumerate(self.model):
appliance_power = appliance_powers[i]
data_is_available = True
cols = pd.MultiIndex.from_tuples([chunk.name])
meter_instance = model['training_metadata'].instance()
df = pd.DataFrame(
appliance_power.values, index=appliance_power.index,
columns=cols)
key = '{}/elec/meter{}'.format(building_path, meter_instance)
output_datastore.append(key, df)
# Copy mains data to disag output
output_datastore.append(key=mains_data_location,
value=pd.DataFrame(chunk, columns=cols))
if not data_is_available:
return
##################################
# Add metadata to output_datastore
# TODO: `preprocessing_applied` for all meters
# TODO: split this metadata code into a separate function
# TODO: submeter measurement should probably be the mains
# measurement we used to train on, not the mains measurement.
# DataSet and MeterDevice metadata:
meter_devices = {
'CO': {
'model': 'CO',
'sample_period': resample_seconds,
'max_sample_period': resample_seconds,
'measurements': [{
'physical_quantity': measurement[0],
'type': measurement[1]
}]
},
'mains': {
'model': 'mains',
'sample_period': resample_seconds,
'max_sample_period': resample_seconds,
'measurements': [{
'physical_quantity': measurement[0],
'type': measurement[1]
}]
}
}
merged_timeframes = merge_timeframes(timeframes, gap=resample_seconds)
total_timeframe = TimeFrame(merged_timeframes[0].start,
merged_timeframes[-1].end)
dataset_metadata = {'name': output_name, 'date': date_now,
'meter_devices': meter_devices,
'timeframe': total_timeframe.to_dict()}
output_datastore.save_metadata('/', dataset_metadata)
# Building metadata
# Mains meter:
elec_meters = {
1: {
'device_model': 'mains',
'site_meter': True,
'data_location': mains_data_location,
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': total_timeframe.to_dict()
}
}
}
# Appliances and submeters:
appliances = []
for model in self.model:
meter = model['training_metadata']
meter_instance = meter.instance()
for app in meter.appliances:
appliance = {
'meters': [meter_instance],
'type': app.identifier.type,
'instance': app.identifier.instance
# TODO this `instance` will only be correct when the
# model is trained on the same house as it is tested on.
# https://github.com/nilmtk/nilmtk/issues/194
}
appliances.append(appliance)
elec_meters.update({
meter_instance: {
'device_model': 'CO',
'submeter_of': 1,
'data_location': ('{}/elec/meter{}'
.format(building_path, meter_instance)),
'preprocessing_applied': {}, # TODO
'statistics': {
'timeframe': total_timeframe.to_dict()
}
}
})
# Setting the name if it exists
if meter.name:
if len(meter.name) > 0:
elec_meters[meter_instance]['name'] = meter.name
building_metadata = {
'instance': mains.building(),
'elec_meters': elec_meters,
'appliances': appliances
}
output_datastore.save_metadata(building_path, building_metadata)
# TODO: fix export and import!
# https://github.com/nilmtk/nilmtk/issues/193
#
# def export_model(self, filename):
# model_copy = {}
# for appliance, appliance_states in self.model.iteritems():
# model_copy[
# "{}_{}".format(appliance.name, appliance.instance)] = appliance_states
# j = json.dumps(model_copy)
# with open(filename, 'w+') as f:
# f.write(j)
# def import_model(self, filename):
# with open(filename, 'r') as f:
# temp = json.loads(f.read())
# for appliance, centroids in temp.iteritems():
# appliance_name = appliance.split("_")[0].encode("ascii")
# appliance_instance = int(appliance.split("_")[1])
# appliance_name_instance = ApplianceID(
# appliance_name, appliance_instance)
# self.model[appliance_name_instance] = centroids
def disaggregate_chunk(self, mains, vampire_power=None):
"""In-memory disaggregation.
Parameters
----------
mains : pd.DataFrame
Returns
-------
appliance_powers : pd.DataFrame where each column represents a
disaggregated appliance. Column names are the integer index
into `self.model` for the appliance in question.
"""
if not self.model:
raise RuntimeError(
"The model needs to be instantiated before"
" calling `disaggregate`. The model"
" can be instantiated by running `train`.")
if len(mains) < self.MIN_CHUNK_LENGTH:
raise RuntimeError("Chunk is too short.")
# sklearn produces lots of DepreciationWarnings with PyTables
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Add vampire power to the model
if vampire_power is None:
vampire_power = mains.values.min()
if vampire_power > 0:
print("Including vampire_power = {} watts to model..."
.format(vampire_power))
n_rows = self.state_combinations.shape[0]
vampire_power_array = np.zeros((n_rows, 1)) + vampire_power
state_combinations = np.hstack(
(self.state_combinations, vampire_power_array))
else:
state_combinations = self.state_combinations
summed_power_of_each_combination = np.sum(state_combinations, axis=1)
# summed_power_of_each_combination is now an array where each
# value is the total power demand for each combination of states.
# Start disaggregation
indices_of_state_combinations, residual_power = find_nearest(
summed_power_of_each_combination, mains.values)
appliance_powers_dict = {}
for i, model in enumerate(self.model):
print("Estimating power demand for '{}'"
.format(model['training_metadata']))
predicted_power = state_combinations[
indices_of_state_combinations, i].flatten()
column = pd.Series(predicted_power, index=mains.index, name=i)
appliance_powers_dict[i] = column
appliance_powers = pd.DataFrame(appliance_powers_dict)
return appliance_powers
| apache-2.0 |
juliusbierk/scikit-image | doc/examples/plot_regional_maxima.py | 18 | 3316 | """
=========================
Filtering regional maxima
=========================
Here, we use morphological reconstruction to create a background image, which
we can subtract from the original image to isolate bright features (regional
maxima).
First we try reconstruction by dilation starting at the edges of the image. We
initialize a seed image to the minimum intensity of the image, and set its
border to be the pixel values in the original image. These maximal pixels will
get dilated in order to reconstruct the background image.
"""
import numpy as np
from scipy.ndimage import gaussian_filter
import matplotlib.pyplot as plt
from skimage import data
from skimage import img_as_float
from skimage.morphology import reconstruction
# Convert to float: Important for subtraction later which won't work with uint8
image = img_as_float(data.coins())
image = gaussian_filter(image, 1)
seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
mask = image
dilated = reconstruction(seed, mask, method='dilation')
"""
Subtracting the dilated image leaves an image with just the coins and a flat,
black background, as shown below.
"""
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 2.5))
ax1.imshow(image)
ax1.set_title('original image')
ax1.axis('off')
ax2.imshow(dilated, vmin=image.min(), vmax=image.max())
ax2.set_title('dilated')
ax2.axis('off')
ax3.imshow(image - dilated)
ax3.set_title('image - dilated')
ax3.axis('off')
fig.tight_layout()
"""
.. image:: PLOT2RST.current_figure
Although the features (i.e. the coins) are clearly isolated, the coins
surrounded by a bright background in the original image are dimmer in the
subtracted image. We can attempt to correct this using a different seed image.
Instead of creating a seed image with maxima along the image border, we can use
the features of the image itself to seed the reconstruction process. Here, the
seed image is the original image minus a fixed value, ``h``.
"""
h = 0.4
seed = image - h
dilated = reconstruction(seed, mask, method='dilation')
hdome = image - dilated
"""
To get a feel for the reconstruction process, we plot the intensity of the
mask, seed, and dilated images along a slice of the image (indicated by red
line).
"""
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, figsize=(8, 2.5))
yslice = 197
ax1.plot(mask[yslice], '0.5', label='mask')
ax1.plot(seed[yslice], 'k', label='seed')
ax1.plot(dilated[yslice], 'r', label='dilated')
ax1.set_ylim(-0.2, 2)
ax1.set_title('image slice')
ax1.set_xticks([])
ax1.legend()
ax2.imshow(dilated, vmin=image.min(), vmax=image.max())
ax2.axhline(yslice, color='r', alpha=0.4)
ax2.set_title('dilated')
ax2.axis('off')
ax3.imshow(hdome)
ax3.axhline(yslice, color='r', alpha=0.4)
ax3.set_title('image - dilated')
ax3.axis('off')
fig.tight_layout()
plt.show()
"""
.. image:: PLOT2RST.current_figure
As you can see in the image slice, each coin is given a different baseline
intensity in the reconstructed image; this is because we used the local
intensity (shifted by ``h``) as a seed value. As a result, the coins in the
subtracted image have similar pixel intensities. The final result is known as
the h-dome of an image since this tends to isolate regional maxima of height
``h``. This operation is particularly useful when your images are unevenly
illuminated.
"""
| bsd-3-clause |
j4321/MyNotes | mynoteslib/constants.py | 1 | 28840 | #! /usr/bin/python3
# -*- coding:Utf-8 -*-
"""
MyNotes - Sticky notes/post-it
Copyright 2016-2019 Juliette Monsel <[email protected]>
MyNotes is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MyNotes is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The scroll.png image is a modified version of the slider-vert.png assets from
the arc-theme https://github.com/horst3180/arc-theme
Copyright 2015 horst3180 (https://github.com/horst3180)
The png icons are modified versions of icons from the elementary project
(the xfce fork to be precise https://github.com/shimmerproject/elementary-xfce)
Copyright 2007-2013 elementary LLC.
The images IM_INFO_DATA, IM_ERROR_DATA, IM_QUESTION_DATA and IM_WARNING_DATA
were taken from "icons.tcl":
A set of stock icons for use in Tk dialogs. The icons used here
were provided by the Tango Desktop project which provides a
fied set of high quality icons licensed under the
Creative Commons Attribution Share-Alike license
(http://creativecommons.org/licenses/by-sa/3.0/)
See http://tango.freedesktop.org/Tango_Desktop_Project
Copyright (c) 2009 Pat Thoyts <[email protected]>
Constants and functions
"""
import os
import gettext
import warnings
from subprocess import Popen
from configparser import ConfigParser
from locale import getdefaultlocale, setlocale, LC_ALL
from subprocess import check_output, CalledProcessError
import ewmh
from PIL import Image, ImageDraw
EWMH = ewmh.EWMH()
SYMBOLS = 'ΓΔΘΛΞΠΣΦΨΩαβγδεζηθικλμνξοπρςστυφχψωϐϑϒϕϖ末»¡¿£¥$€§ø∞∀∃∄∈∉∫∧∨∩∪÷±√∝∼≃≅≡≤≥≪≫≲≳▪•✭✦➔➢✔▴▸✗✚✳☎✉✎♫⚠⇒⇔'
# --- paths
PATH = os.path.dirname(__file__)
if os.access(PATH, os.W_OK) and os.path.exists(os.path.join(PATH, "images")):
# the app is not installed
# local directory containing config files and sticky notes data
LOCAL_PATH = PATH
PATH_LOCALE = os.path.join(PATH, "locale")
PATH_IMAGES = os.path.join(PATH, "images")
PATH_DATA_BACKUP = os.path.join(LOCAL_PATH, "backup", "notes.backup%i")
PATH_DATA = os.path.join(LOCAL_PATH, "backup", "notes")
if not os.path.exists(os.path.join(LOCAL_PATH, "backup")):
os.mkdir(os.path.join(LOCAL_PATH, "backup"))
else:
# local directory containing config files and sticky notes data
LOCAL_PATH = os.path.join(os.path.expanduser("~"), ".mynotes")
if not os.path.isdir(LOCAL_PATH):
os.mkdir(LOCAL_PATH)
PATH_LOCALE = "/usr/share/locale"
PATH_IMAGES = "/usr/share/mynotes/images"
PATH_DATA_BACKUP = os.path.join(LOCAL_PATH, "notes.backup%i")
PATH_DATA = os.path.join(LOCAL_PATH, "notes")
PATH_CONFIG = os.path.join(LOCAL_PATH, "mynotes.ini")
PATH_LATEX = os.path.join(LOCAL_PATH, "latex")
PATH_LOCAL_DATA = os.path.join(LOCAL_PATH, "data")
PIDFILE = os.path.join(LOCAL_PATH, "mynotes.pid")
if not os.path.exists(PATH_LATEX):
os.mkdir(PATH_LATEX)
# --- images files
ICON_NAME = "mynotes-tray" # gtk / qt tray icon
IM_TKTRAY_ICON = os.path.join(PATH_IMAGES, "mynotes.png") # tk tray icon
IM_ICON_48 = os.path.join(PATH_IMAGES, "mynotes48.png") # for the tk dialogs
IM_CLOSE = os.path.join(PATH_IMAGES, "close.png")
IM_CLOSE_ACTIVE = os.path.join(PATH_IMAGES, "close_active.png")
IM_ROLL = os.path.join(PATH_IMAGES, "roll.png")
IM_ROLL_ACTIVE = os.path.join(PATH_IMAGES, "roll_active.png")
IM_LOCK = os.path.join(PATH_IMAGES, "verr.png")
IM_PLUS = os.path.join(PATH_IMAGES, "plus.png")
IM_MOINS = os.path.join(PATH_IMAGES, "moins.png")
IM_DELETE_16 = os.path.join(PATH_IMAGES, "delete_16.png")
IM_DELETE = os.path.join(PATH_IMAGES, "delete.png")
IM_CHANGE = os.path.join(PATH_IMAGES, "change.png")
IM_SELECT = os.path.join(PATH_IMAGES, "select.png")
IM_SORT_REV = os.path.join(PATH_IMAGES, "sort_rev.png")
IM_VISIBLE = os.path.join(PATH_IMAGES, "visible.png")
IM_HIDDEN = os.path.join(PATH_IMAGES, "hidden.png")
IM_VISIBLE_24 = os.path.join(PATH_IMAGES, "visible_24.png")
IM_HIDDEN_24 = os.path.join(PATH_IMAGES, "hidden_24.png")
IM_CLIP = os.path.join(PATH_IMAGES, "clip.png")
IM_SCROLL_ALPHA = os.path.join(PATH_IMAGES, "scroll.png")
IM_CHECKED = os.path.join(PATH_IMAGES, "checked.png")
IM_UNCHECKED = os.path.join(PATH_IMAGES, "unchecked.png")
IM_TRISTATE = os.path.join(PATH_IMAGES, "tristate.png")
# --- images data
IM_ERROR_DATA = """
iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAABiRJREFU
WIXFl11sHFcVgL97Z/bX693sbtd2ipOqCU7sQKukFYUigQgv/a+hoZGoqipvfQKpAsEDD0hIvCHE
j/pQ3sIDUdOiIqUyqXioEFSUhqit7cRJFJpEruxs1mt77Z3d2Z259/KwM5vZXTtOERJXOrozZ+6e
852fuXcW/s9D3O3Cs1Bow1Nx234BKQ9qpYpK6yFLSseScsVoveApdUrAzNOw9j8DOAMTtmX9RsM3
SqOjevcXDqUzu8dI5AvEc8O0axu4q6s4yzdZvnCxUSmXLWHMXzxjXpmGq/81wGmIZ6T8NXDi8w8d
id//+GPS8j1YWQXHgVYbfA/sGCRiMDQExTzKtvn3zDv6k9m5FsacXNT6+y+D95kAZqCEEO/cMzIy
9eBLLybjyodrN6DpDqw1/dfpFNw3TtuSfPz7P7irlZUL2pjHn4GVuwJ4G/JCiLl9U1OjB58/ZnP5
Mqxv3NGpMWZAz64cHNzHlTf/5N9YuHzTMeaLx6HW78+K3pwGKynEu/snJycOHPuWzdw81BuDUQZO
dfQ+MmvAuC1MdY3i178izUo15VZXj07DyTf6OGX0Jivlz0vFwgMTz3/bNnMXO0ZCo8b0iIk4C0WF
zsP1TRc1e4l9x56N5YuFwxkpf9afgW4J/gi7M1IuHH3lezm5uAQbmwOpjc79ujArA2uMgWwGMz7K
P377u/WW1pPTUB7IQFrKXx44NJWRbQ9d2+hGqbeRMEoTZEQFJdERfVgmvVFH+D57Jw9k4lL+YqAE
pyGnjZm+95knLHVjcVvHA6WIPgtLE+hVH4i6vsS9T3zTVsY8NwPZHoAUPFUs5JVQCt1q9zqORKm3
iLKrF6IjkfSHOiUlqu0hhCSXHdYePNYDEBPiu6MT+zOquo6JGNGhESkxUnYNmkCnLQtjWRgpMRG9
CtZ3JdD7axsU9+3N2EK8EALYQcNMpvfuQTcaXUMIAa+/Hi0Xgs9weASjefx4p5mFQDdbpD63G/HR
hakeAA2l+EgJU652iIMMyO2sRoYxBq1191oIgZQSITqooT0A7fnEirswUAp/LwG0MZlYIY9WqpPa
IHU7Da01Sqluo4UQSil830dr3emVsBeMIZbLoI0Z7gGQQtTbjoOOxW/XewcApVQ38jsBNs6fx6tW
O70Si+GWKwghNsM1NoCAW81KJTeUjKNbrR2N7uS4B7TRwJ+fR6TTxO4fxzUeAio9AMCl+tVrE0NH
DmM2nU4DAu6JE53UGoNfLuNdv45xnO4OF/ZKz+4X2T179I6D5To0NupouNgD4Btzqjx/8WjpS0cy
PU1Tr6MqFfylpc4bss1W26/rBwyfybECtcvXNrUxp3oAXJjZ2Kxb7cVP8P61gDGgWy2M624Z5d1E
3wNkDDKdwMQkjtuygbMhgAQ4DjUhxFvL/5z15X1jeLUaynW7p1u484WiuL3V9m/NoV6F50Ogjx3Y
Q/mDBV8a3piGzR4AAFfrHy4vlesmm0bks7edRQ6aAafcPoZVH2AUXOYzkI5TvbVa9+FHREYX4Bgs
I8RrV9/9oJF4eBKTjO8YvdoCJgqujcGkEqQemmDxb7OOFOLV6FHcAwBQ1/onTtOd/fTvH3rJRx/A
pBIDqd0q+p5sRaInnWDoywdZem+u7bbaH9W1/il9Y2Brfwt22TBfKOVHxr92JOacv4S/UuttuC06
PKoHsEs5hg7vZ/m9eW+zWltuwoNbfRNuebacgXsEnE2lkof2Hn04ZRouzQvXUU5z29cwFGs4TWpy
HJGK8+lfP256bnuuDU8+B9WtfG17uL0GsTF4VQrxYn60kBh55JDEbdG6uYq/7qDdFtpTELOQyQRW
Lk1sLI+MW9w6d8Wv3Vrz2nDyJPzgDDS287MVgAAywBCQ+Q5MTsOPs/BIMpVQ2bFCKlnMYg+nsYeS
eE6TVq1Be3WD9ZtrTc9tWetw7k341dtwBagDTmTeESAdAAxH5z0w9iQ8ehi+moWxBGRsiPvguVBf
h8qH8P6f4dxSp9PrdN73cN6k859R3U0J0nS+28JMpIM5FUgCiNP5X2ECox7gAk06KQ8ldLzZ7/xO
ANHnscBhCkgGjuOB3gb8CEAbaAWO3UA34DQ6/gPnmhBFs5mqXAAAAABJRU5ErkJggg==
"""
IM_QUESTION_DATA = """
iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAACG5JREFU
WIXFl3twVdUVxn97n3Nubm7euZcghEdeBBICEQUFIgVECqIo1uJMp3WodqyjMzpjZ7TTh20cK31N
/2jL2FYdKXaqRcbnDKGpoBFaAY1BHgHMgyRKQkJy87yv3Nyzd/84594k1RlppzPumTXn3Dl3r/Wd
b31rrbPhS17iSv+4bl2t2ZFhrRGI7QKxRkMAyHEfjwgYEOgjNnpfcXjiSENDbeL/AqBoW22uGE/7
MYL7yubN4MYVpVkrquaKqwJZ+LPTARgcjdIbHKOx+aI+9EH7WGvnZdA8q9PGf9b5eu3w/wygaPPO
h6Uhntxcsyj9/q+vtMrnBa6Is7ZPgzzzyvGJ/YfPRpWWj3fWff93/xWAonW1Xu3z/nVx6cxNTz74
1YzK4gIQjuN/nfyEEx9fIjgaYXAkhhAQyE3Hn5PBsvJZrF46l5I5+QB83NnP40+/FT7d1ltPOPrN
zoba2BcCWLy91hMOp72/bX1VxU/u3+BJ91i0fhrkuTcaaTzbjTQkhpQIIZBSIBApL1prtNYsryhk
xy1XUzonn1g8wVPPvh1/5dDpcz5f7LrmfbXxqfGM6eG1yCw+9uq2G6tW7nxoU5plGrzecJYnnnub
SwMhTNPAmmKmYWCaBoYpMQyJaRhIQ3IpGOKt4+1k+dKoLJ7BjStKjb6hcN7JloFrhlsO7oUnPh9A
8Rbvo6uuLrr3N4/ckm4Ykt/vPcqe/R9hGAamaWJZbnDL+W2axqRJA8NlxzAkAI3newhF4lxbMZs1
y4rNM+19c0PZ++NDLQff+0wKCu/Y6c/UVsubv/12/ryZubxUf5Ln3vgQ0zKnvK1kadkMlpQUUFEU
oCDPR25WOuPxBH2DYZpa+qg/3kEoGsdWCttWJGzF3ZuXcuf6Ci5eHmXrw7sHR4mXd7/2w+A0Bvyl
N+265/bl19+8eqE8c6GPn+85jGkYWC4Ay3Luf/3AV1g038+MXB8+rwfDkKR5TPKyvCyan8+qqtmc
au8nFrcdnQCn2vuoLptJSWEeE7bynDjdXTDUcvBNAAmweF1tpmXKu+65bYWh0Ty97zhSyGkUO0BM
hBAI4RAXTyjiCYWUEukKMz/Ly/b1C7EsE49lYlkmhjTYvf8jNHD3lmsM0zTuWryuNhPABIj4vFvW
Xl0s87PTOdXWS8snQTwec4ro3DSYBglbcfx8P+8199I7FMEQgg3L53N7TWkKXOV8Px7LJCFtXKx0
dA9zrnOAyqIAa68tkQePtm4BXpaO9vWOm65b4EPAkY+6HDEZTt4NN/dJML946QSv/fMCA6PjpHks
LI/F2a5BtNYpMUtJirGpLL7f3A3AxpXlPiHFjhQDaJZVlc0EoPWT4DQ1m8ZkKizTJDRuY1mmC04i
pWDNksJUD9Bac7E/jGUZrmuN1qCU5sKlIQAqSwrQWi+bBCDwF+RnAk5fl27wqeYAkZM9wLWaxVex
qnJmKritFO+e7sMyDdBOc1JKYxiSkdA4CMGM3Aw02j+VAfLcwTIWibuiEpNApJMSw208ydJcu3QW
axZPCW7bHGjspmcwimkYTmAlMWzHTyTmDMiczLRU/ctkNxgajboPvUghppuUGFJMY6O6OJ/ViwIo
pVBKYds2dR9e4uPuMbc7Tm9MUgqyM70AjITHUy1IAghNsH8oDEAgz4cQOIqWjkkpEC4rSYfXL/Sn
giulONYyRFd/1GXKAZxkUrgvkp/tAAgORxAQnAQg5InmC5cBWDgv4NS5EAhAINzyIlVmUgiy040U
9Uop2voiKYakEAiRvDp7EYKS2XkAnOvsR0h5IqUBrfWeQ8fb1t2xvtJXs3QuB462TfZokbxMGZxC
8If6DtI8Fh6PhcdjojSpBuXin7Kc3csXzQLgrWOtEWWrPSkAvkis7kjTBTU8FqOypIAF8/x09Y6Q
FGjyTdHJstLsWDsnNZIBXj7Wj1LKYSS5B412nRTNymHBnHxGQ+O8836r8kVidakUNDfUhhIJtfcv
dU22AO69dRlCCNeZU8fJe6U0ylZYBlgGmNKx+ESCiYRNwlYoWzn/UxqtHOB3ra8AAX/7x0nbttXe
5oba0GQVAPGE9dju1z4Y7u4fY9F8P9/YWOUEV06O7eTVnXBTBaiUIj4xwcSETSJhk7BtbNtOPdta
U0ZpYS59wRB/2ndsOBa3HkvGTU3D0fb6aE7ZBt3RM1yzuabcqiwKEI5N0N495ChaSKcihJPRa0pz
sbUmYTugPmgbJmErB4DLxETC5oYlhWxdXUrCVvxgV32krav/qa4Djx76D4kllxalt/7q9e2bqjf9
9Lsb0oQQHGrsYO+hc0gp3emW/Bhxm5NbZlqD0g79CTcFt60u4YYlhWhg5/MN4y/WNdW3vfnoNhD6
Mww46wlmV9/w6snzA1sHRqKBVUvnGQvm+qkuKyA4GqVvKOJAdrcn8zz14yNh2ywozOVbGyuoKg4w
PmHzyxcOx1+sazqTlhbZ3H92vT29Pj5nzVn1SLqVH3ipunzOxqceutlX6n7lXrw8yqn2flq7hxgL
TzAWiyOFICfTS44vjbLCXKqK/cwOOHOl49IwP9r192hT84V3e4+9cF90sC0IRL8QAOADsgvXfu9B
b3bgkTs3LPN+52srzPlX5V7RUerTy6M8/0Zj4uUDH45Hg13PdB/9425gzLUhQH0RgDQgC8hKLyid
7a/c9oCV4d9WVTpLbF5TmX5tRaGYkecjJ8MLAkZD4wyMRGg636PrDjfHzrT26NhYT33w1Kt/Hh/u
6XUDh4BBIHwlDIBTohlANpBhWb6s7PKNK30FCzZa6dnVYORoIX2OExVF26Px8NCZSN/5d0bb3mlK
JGIhHLpDwLAL4jPnxSs9nBqABXhddrw4XdRygSrABuKuxYBx9/6KDqlf2vo3PYe56vmkuwMAAAAA
SUVORK5CYII=
"""
IM_INFO_DATA = """
iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABmJLR0QA/wD/AP+gvaeTAAAACXBI
WXMAAAsTAAALEwEAmpwYAAAAB3RJTUUH1gUdFDM4pWaDogAABwNJREFUWMPFlltsVNcVhv+199ln
bh7PjAdfMGNDcA04EKMkJlIsBVJVbRqlEVUrqyW0QAtFTVWpjVpFfamUF6K+tCTKQyXn0jaiShOr
bRqRoHJpEEoIEBucENuk2OViPB5f5j5zrvuc3YcMFQ8FPBFVj7S0paN91v+tf1/OAv7PD9UzeeCp
p0KRCrYyHtymoPrgySYAANdyBBr2Peu1agP+NrR/v3nHAb6/52d7wfivWlet11NdvZG21laEwzo0
RvA9F4uLi7h08bxxaWLUVp78xSsv/XrwjgAMDDyjRxPWUGOy5Uu9/VsjEA3I5KvIVQ240gHIh9CA
5YkwelIJRATw94NvGpnpK0fL+eDA0NAzzq3ya7cDjCbsoWWr1j+y4f4vB/41Z8JTeaxqE7hndSNi
EeELzn3LkapQdfzJTE5JV/GBb28LHz327lcnzp4ZAvB1AOpmAvyWtv/g6R9GW1c+uf6Bx0Kfzpjo
TmnYtDaKtkTAj4aEFBqTnJPUOfciIeG3N4XVQtmyzl/JuY8/fH9wOjO/smvVmuy5s+8P1w2wa9dP
46SLN3sf2ha7uiixaU0Qna06NA6PMXIZQRJBMiIXRBKABygv3hBQV+bK1dmcoR7d3Bc5c/pk/8YN
fYOjo6es/6bDbgbAdLa9uXNj2PYF2pOEloQGAiRIuUTkME42J7IZweYES+NkckZWWNfseEPAKJtO
oWxLu69/c5jpbPtNdW7qPwvsbO1cF8pVLKxs0+HD94gpl0AOQTlEsDkjizFmMk4WESyNM4NzMgOC
VYI6q17OlIp9992ngek769+EvtfVEI3jWqaKgAgAIAlFLuOwGZHDiTnElGQgF4DvM1LKV7Bdz2NE
xaCuhQpVm1Y0p5qhvNV1AyjlRTWhwVM2TMdzgkJzieAQyGGMbMZgfwZBEiBPA3xX+VSouAvBAFeM
yDddD7rgpHw/WjcAMa0EZScZk5heqFrxiO4BzCGCzYgsBrI4I5sYcxlBKl/5WdOdd6S0gxoLEZEi
Iq4AnzGq1r0HiPhYuZRFU1R3FgqWkS1aZQA2gWzOyGQcJudkaAwVR3qz8yXzvCXlzJoViaagrlWC
jJnLm8Jarli2GNMm6wbwPPO31y6Ollc2N3pcI+fyYjW/8a5EKqQTz5WtdLHsTi1W7Im5vDlcMdxx
wVk2Ys9/pTI3+WhAaIauM+MLbYnlH46MVKVyX6v7Hhg9e2ps3doN32ld0Rlrb1nmmK4stCdCSCUj
Le1NwW6uXJ08m/t2OarBXh0ie0syHu0plKtTFGw8n4o33q1z1XngD7+X3C/uHBkZces7hoAi1946
fPSvtpDlYFdLPDI8mR03HC87frXwFpgqLYuFuzrbkg8m49EeDsqDa+cizXcNpppia5ui+sYXnn+O
29LbOTg4aHzun9GOPT/pDemhf3xzx25DicjkiqaAIs4zhumMRUJaPhzgJZ0LQ5C7gXjQL1kS0YD+
o337nhWlYvHJV178zZ9vlZ/dDuDVl57/2HWt755894hINoYSmZx11TYKCUZKCs4cnQuDmGtfvDiR
dD3n04aA6J4YHzeLhfLg7cSXBAAA5NPpufS1WFjwkFSelZ6ZLWfn0kliTDJdue8dO9qenp2d1DVR
4cTarlyZJgV5dim5lwTw8sv7c1L6H89cm6FlDcHVhlOJffThsa9d+ud72y5+cnTn2PjJJ1avjOoE
SnBiPadOfRDTGT5YSm5tqR2R7Zp7//L6gRPf27NjVaolqS9MCzh28W6mgDXdKxCNRb/oOlV18O3D
1xzXGXpx8LnZO94Tbt/x+MFYouexh7dsQU/PWjRGI+BcAyMgm1vAO28fxvj4xOX5jL7u0KEX7Dvq
AAC0Nucf2rLZhq8Y3njjT8gulOBKDw0NAQjNQT435eQWL3iHDk3YS81ZF0B6psI/GbuAXbu+gQf7
H4ArPeQWC5jLZKCUhQvjWb2QD3bVk5PVM9nz5LML8waOH38fekBHIhFDqqMFXd0pnDhxGmMTU3Bd
9/X/GQDntO/eezswMPBjaFwAABxH4sKFq+jt7cX6ni6EQuJbdeWsZ3J3d/PTmqaEYUyhXDZBTEOh
WIIQwOi5jzA1eRnZXPFSPO7/bmbGlLfqhus5BVotRH9/x7rGxtBeIQJPACrMOYNSPpRiUIpnlTIO
nzmT+eX8fLH8WZMKF4Csje7ncUAHEKhFcHq6ZE5OZoc7O3tlc3N33+7dP9c2bXoE09NlO52uHDhy
ZOTVatUWte+otsTXg2pQSwagG6r/jwsAQul0erqjo+OesbGx1tHRUT+fz48dP378j57neQD8mtB1
B1TtnV9zo64loJqoXhtFDUQHEGhvb2/2fZ9nMpliTcAFYNdC1sIBYN1sCeq5Ca9bqtWcu9Fe3FDl
9Uqvu3HLjfhvTUo85WzjhogAAAAASUVORK5CYII=
"""
IM_WARNING_DATA = """
iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAABHNCSVQICAgIfAhkiAAABSZJREFU
WIXll1toVEcYgL+Zc87u2Yu7MYmrWRuTJuvdiMuqiJd4yYKXgMQKVkSjFR80kFIVJfWCWlvpg4h9
8sXGWGof8iKNICYSo6JgkCBEJRG8ImYThNrNxmaTeM7pQ5IlJkabi0/9YZhhZv7///4z/8zPgf+7
KCNRLgdlJijXwRyuDTlcxV9hbzv8nQmxMjg+XDtiOEplkG9PSfkztGmTgmFQd+FCVzwa3fYN/PHZ
AcpBaReicW5xcbb64IEQqko8Lc26d/58cxS+/BY6hmJvyEfQBoUpwWCmW1FErKaGWHU13uRk4QkE
UtxQNFR7QwIoB4eiKD9PWbVKbb10CZmaCqmpxCormRYO26QQx85B0mcD+AeK0xYvHqu1tNDx+DH6
gQM4jh0j3tCA3tGBLyfHLuD7zwJwAcYqun44sHy51nr5MsqsWWj5+djCYdS5c4ldvUr24sU2qarf
lUL6qAN0wqH0vDy7+fAhXZEI+v79CNmt7igpofPVK5SmJvyhkJBwYlQBSiHd7vUWZ86bp8WqqtCW
LkVbuBAhBEIItGAQ2+rVxG7cICMY1KTDsekc5IwagIQTmStXis47dzBiMfR9+xCi+wb39s79+zFi
MczGRjLmzTMlnBoVgLMwyzF+/Cb/lClq2/Xr2AoKUKdPxzAMWltbiUajmKaJkpGBY8sW3tbW4g8E
VNXrXVEKK0YMoMKp7Px8K15Tg2VZOHbvBiASiRAMBgkGg0QiEYQQOIuLsRSFrnv3yJo/HxVOW594
7D4KUAa57qysvNSUFOVtbS32rVuRfj9CCFwuV2Kfy+VCCIFMScFVVET7/fukJidLm883rQy+HhaA
BUII8cvUNWt4W1WFcLvRd+5MnHl/AOjOB+eOHchx44jX1ZEdCqkSTpaDbcgA5+GrpNmzc9ymKdvr
67Hv2oVMSko4cjgcKIqCoijoup64EdLpxLV3Lx1PnuCVUrgmTfK9hV1DAjgKqlSUk1PCYdl25QrS
70cvLEw4SWS+04nT6XxvXgiBc8MGtKlTaa+rIysnR1Ok/OF38PxngAzY4VuwYKL99WvR8fQpjj17
kLqeiL6393g8eDyeAWBSVfEcOkRXczOOaBRvVpZuDPJEDwD4DVyKrv+UlZurxSorUWfMQC8oGOBc
CDHgC/Rdc4TD2BctIl5fT+bkyTahaXvOw8RPApiwd2Ju7hjZ2EhXSwvOkhKQcoADgIqKCioqKgYc
QW9LOnIEIxZDbWpiXCCABT9+FKAUxtm83pKMUEiLVVejLVqEtmTJB50LIdi2bRuFPbnRd7232efM
wbVuHR2PHjHR77dJXS8sg5mDAihweFJenmrevYvR1oazpGTQ6IQQaJqG7ClI/dd655IOHsSyLMSL
F6QFAib9nugEQClk2Xy+orTsbK3t1i3sa9ei5eQMGr0QgvLyci5evDiocyEEtsxMPNu30/nsGRO8
XlVzu8NlkNvrV+0T/fHMZcusrtu3MeNx9PXrobUVq8cYQrw3TrRub1h9+v573Bs3Ej1zBvP5c/zp
6dbLhoaTwPy+ANKCfF92thq7dg2A6JYt/fNlxGK8eUNSerryHEJHQT8K8V4A5ztojty8OeaLzZul
1DSwLCzDANPEMozusWFgmWZ33288YK3/nGlixuM0v3xpWfDX0Z4i1VupXEWwIgRnJfhGPfQ+YsLr
+7DzNFwCuvqWyiRg7DSYoIBu9smPkYqEd4AwIN4ITUAL0A4Da7UC6ICdEfy2fUBMoAvo7GnWKNoe
mfwLcAuinuFNL7QAAAAASUVORK5CYII=
"""
ICONS = {"information": IM_INFO_DATA, "error": IM_ERROR_DATA,
"question": IM_QUESTION_DATA, "warning": IM_WARNING_DATA}
# --- config file
AUTOCORRECT = {'->': '→', '<-': '←', '<->': '↔', '=>': '⇒', '<=': '⇐',
'<=>': '⇔', '=<': '≤', '>=': '≥', ":)": '☺'}
CONFIG = ConfigParser()
if os.path.exists(PATH_CONFIG):
CONFIG.read(PATH_CONFIG)
LANGUE = CONFIG.get("General", "language")
if not CONFIG.has_option("General", "position"):
CONFIG.set("General", "position", "normal")
if not CONFIG.has_option("General", "check_update"):
CONFIG.set("General", "check_update", "True")
if not CONFIG.has_option("General", "buttons_position"):
CONFIG.set("General", "buttons_position", "right")
if not CONFIG.has_option("General", "symbols"):
CONFIG.set("General", "symbols", SYMBOLS)
if not CONFIG.has_option("General", "trayicon"):
CONFIG.set("General", "trayicon", "")
if not CONFIG.has_option("Font", "mono"):
CONFIG.set("Font", "mono", "")
if not CONFIG.has_option("General", "autocorrect"):
value = "\t".join(["%s %s" % (key, val) for key, val in AUTOCORRECT.items()])
CONFIG.set("General", "autocorrect", value)
else:
AUTOCORRECT = {}
for ch in CONFIG.get("General", "autocorrect").split('\t'):
key, val = ch.split(' ')
AUTOCORRECT[key] = val
else:
LANGUE = ""
CONFIG.add_section("General")
CONFIG.set("General", "language", "en")
CONFIG.set("General", "opacity", "82")
CONFIG.set("General", "position", "normal")
CONFIG.set("General", "buttons_position", "right")
CONFIG.set("General", "date_in_title", "True")
CONFIG.set("General", "check_update", "True")
CONFIG.set("General", "symbols", SYMBOLS)
CONFIG.set("General", "trayicon", "")
value = "\t".join(["%s %s" % (key, val) for key, val in AUTOCORRECT.items()])
CONFIG.set("General", "autocorrect", value)
CONFIG.set("General", "splash_supported", str(os.environ.get('DESKTOP_SESSION') != 'plasma'))
CONFIG.add_section("Font")
CONFIG.set("Font", "text_family", "TkDefaultFont")
CONFIG.set("Font", "text_size", "12")
CONFIG.set("Font", "title_family", "TkDefaultFont")
CONFIG.set("Font", "title_size", "14")
CONFIG.set("Font", "title_style", "bold")
CONFIG.set("Font", "mono", "")
CONFIG.add_section("Categories")
# --- system tray icon
def get_available_gui_toolkits():
"""Check which gui toolkits are available to create a system tray icon."""
toolkits = {'gtk': True, 'qt': True, 'tk': True}
b = False
try:
import gi
b = True
except ImportError:
toolkits['gtk'] = False
try:
import PyQt5
b = True
except ImportError:
try:
import PyQt4
b = True
except ImportError:
try:
import PySide
b = True
except ImportError:
toolkits['qt'] = False
tcl_packages = check_output(["tclsh",
os.path.join(PATH, "packages.tcl")]).decode().strip().split()
toolkits['tk'] = "tktray" in tcl_packages
b = b or toolkits['tk']
if not b:
raise ImportError("No GUI toolkits available to create the system tray icon.")
return toolkits
TOOLKITS = get_available_gui_toolkits()
GUI = CONFIG.get("General", "trayicon").lower()
if not TOOLKITS.get(GUI):
DESKTOP = os.environ.get('XDG_CURRENT_DESKTOP')
if DESKTOP == 'KDE':
if TOOLKITS['qt']:
GUI = 'qt'
else:
warnings.warn("No version of PyQt was found, falling back to another GUI toolkits so the system tray icon might not behave properly in KDE.")
GUI = 'gtk' if TOOLKITS['gtk'] else 'tk'
else:
if TOOLKITS['gtk']:
GUI = 'gtk'
elif TOOLKITS['qt']:
GUI = 'qt'
else:
GUI = 'tk'
CONFIG.set("General", "trayicon", GUI)
if GUI == 'tk':
ICON = IM_TKTRAY_ICON
else:
ICON = ICON_NAME
# --- language
setlocale(LC_ALL, '')
APP_NAME = "MyNotes"
LANGUAGES = {"fr": "Français", "en": "English", "nl": "Nederlands", "de": "Deutsch", "ca": "Català", "es": "Español"}
REV_LANGUAGES = {val: key for key, val in LANGUAGES.items()}
if LANGUE not in LANGUAGES:
# Check the default locale
LANGUE = getdefaultlocale()[0].split('_')[0]
if LANGUE in LANGUAGES:
CONFIG.set("General", "language", LANGUE)
else:
CONFIG.set("General", "language", "en")
gettext.find(APP_NAME, PATH_LOCALE)
gettext.bind_textdomain_codeset(APP_NAME, "UTF-8")
gettext.bindtextdomain(APP_NAME, PATH_LOCALE)
gettext.textdomain(APP_NAME)
LANG = gettext.translation(APP_NAME, PATH_LOCALE,
languages=[LANGUE], fallback=True)
LANG.install()
gettext.gettext = LANG.gettext
# --- default categories
if not CONFIG.has_option("General", "default_category"):
CONFIG.set("General", "default_category", _("home"))
CONFIG.set("Categories", _("home"), '#F9F3A9')
CONFIG.set("Categories", _("office"), '#A7B6D6')
# --- colors
COLORS = {_("Blue"): '#A7B6D6', _("Turquoise"): "#9FC9E2",
_("Orange"): "#E1C59A", _("Red"): "#CD9293",
_("Grey"): "#CECECE", _("White"): "#FFFFFF",
_("Green"): '#C6FFB4', _("Black"): "#7D7A7A",
_("Purple"): "#B592CD", _("Yellow"): '#F9F3A9',
_("Dark Blue"): "#4D527D"}
INV_COLORS = {col: name for name, col in COLORS.items()}
TEXT_COLORS = {_("Black"): "black", _("White"): "white",
_("Blue"): "blue", _("Green"): "green",
_("Red"): "red", _("Yellow"): "yellow",
_("Cyan"): "cyan", _("Magenta"): "magenta",
_("Grey"): "grey", _("Orange"): "orange"}
def active_color(color, output='HTML'):
"""Return a lighter shade of color (RGB triplet with value max 255) in HTML format."""
r, g, b = color
r *= 0.7
g *= 0.7
b *= 0.7
if output == 'HTML':
return ("#%2.2x%2.2x%2.2x" % (round(r), round(g), round(b))).upper()
else:
return (round(r), round(g), round(b))
def color_box(color):
im = Image.new('RGBA', (18, 16), (0, 0, 0, 0))
draw = ImageDraw.Draw(im)
draw.rectangle([3, 3, 13, 13], color, 'black')
return im
# --- latex (optional): insertion of latex formulas via matplotlib
try:
from matplotlib import rc
rc('text', usetex=True)
from matplotlib.mathtext import MathTextParser
from matplotlib.image import imsave
parser = MathTextParser('bitmap')
LATEX = True
except Exception:
LATEX = False
def math_to_image(latex, image_path, **options):
img = parser.to_rgba(latex, **options)[0]
imsave(image_path, img)
# --- filebrowser
ZENITY = False
paths = os.environ['PATH'].split(":")
for path in paths:
if os.path.exists(os.path.join(path, "zenity")):
ZENITY = True
try:
import tkfilebrowser as tkfb
except ImportError:
tkfb = False
from tkinter import filedialog
def askopenfilename(defaultextension, filetypes, initialdir, initialfile="",
title=_('Open'), **options):
"""
Open filebrowser dialog to select file to open.
Arguments:
- defaultextension: extension added if none is given
- initialdir: directory where the filebrowser is opened
- initialfile: initially selected file
- filetypes: [('NAME', '*.ext'), ...]
"""
if tkfb:
return tkfb.askopenfilename(title=title,
defaultext=defaultextension,
filetypes=filetypes,
initialdir=initialdir,
initialfile=initialfile,
**options)
elif ZENITY:
try:
args = ["zenity", "--file-selection",
"--filename", os.path.join(initialdir, initialfile)]
for ext in filetypes:
args += ["--file-filter", "%s|%s" % ext]
args += ["--title", title]
file = check_output(args).decode("utf-8").strip()
filename, ext = os.path.splitext(file)
if not ext:
ext = defaultextension
return filename + ext
except CalledProcessError:
return ""
except Exception:
return filedialog.askopenfilename(title=title,
defaultextension=defaultextension,
filetypes=filetypes,
initialdir=initialdir,
initialfile=initialfile,
**options)
else:
return filedialog.askopenfilename(title=title,
defaultextension=defaultextension,
filetypes=filetypes,
initialdir=initialdir,
initialfile=initialfile,
**options)
def asksaveasfilename(defaultextension, filetypes, initialdir=".", initialfile="",
title=_('Save As'), **options):
"""
Open filebrowser dialog to select file to save to.
Arguments:
- defaultextension: extension added if none is given
- initialdir: directory where the filebrowser is opened
- initialfile: initially selected file
- filetypes: [('NAME', '*.ext'), ...]
"""
if tkfb:
return tkfb.asksaveasfilename(title=title,
defaultext=defaultextension,
filetypes=filetypes,
initialdir=initialdir,
initialfile=initialfile,
**options)
elif ZENITY:
try:
args = ["zenity", "--file-selection",
"--filename", os.path.join(initialdir, initialfile),
"--save", "--confirm-overwrite"]
for ext in filetypes:
args += ["--file-filter", "%s|%s" % ext]
args += ["--title", title]
file = check_output(args).decode("utf-8").strip()
if file:
filename, ext = os.path.splitext(file)
if not ext:
ext = defaultextension
return filename + ext
else:
return ""
except CalledProcessError:
return ""
except Exception:
return filedialog.asksaveasfilename(title=title,
defaultextension=defaultextension,
initialdir=initialdir,
filetypes=filetypes,
initialfile=initialfile,
**options)
else:
return filedialog.asksaveasfilename(title=title,
defaultextension=defaultextension,
initialdir=initialdir,
filetypes=filetypes,
initialfile=initialfile,
**options)
# --- compatibility
def add_trace(variable, mode, callback):
"""
Add trace to variable.
Ensure compatibility with old and new trace method.
mode: "read", "write", "unset" (new syntax)
"""
try:
return variable.trace_add(mode, callback)
except AttributeError:
# fallback to old method
return variable.trace(mode[0], callback)
# --- miscellaneous functions
def open_url(url):
Popen(['xdg-open', url])
def sorting(index):
"""Sorting key for text indexes."""
line, char = index.split(".")
return (int(line), int(char))
def save_config():
"""Save configuration to file."""
with open(PATH_CONFIG, 'w') as fichier:
CONFIG.write(fichier)
def backup(nb_backup=12):
"""Backup current note data."""
backups = [int(f.split(".")[-1][6:])
for f in os.listdir(os.path.dirname(PATH_DATA_BACKUP))
if f[:12] == "notes.backup"]
if len(backups) < nb_backup:
os.rename(PATH_DATA, PATH_DATA_BACKUP % len(backups))
else:
os.remove(PATH_DATA_BACKUP % 0)
for i in range(1, len(backups)):
os.rename(PATH_DATA_BACKUP % i, PATH_DATA_BACKUP % (i - 1))
os.rename(PATH_DATA, PATH_DATA_BACKUP % (nb_backup - 1))
def optionmenu_patch(om, var):
"""Variable bug patch + bind menu so that it disapear easily."""
menu = om['menu']
last = menu.index("end")
for i in range(0, last + 1):
menu.entryconfig(i, variable=var)
menu.bind("<FocusOut>", menu.unpost())
def text_ranges(widget, tag, index1="1.0", index2="end"):
"""
Equivalent of Text.tag_ranges but with an index restriction.
Arguments:
- widget: Text widget
- tag: tag to find
- index1: start search at this index
- index2: end search at this index
"""
r = [i.string for i in widget.tag_ranges(tag)]
i1 = widget.index(index1)
i2 = widget.index(index2)
deb = r[::2]
fin = r[1::2]
i = 0
while i < len(deb) and sorting(deb[i]) < sorting(i1):
i += 1
j = len(fin) - 1
while j >= 0 and sorting(fin[j]) > sorting(i2):
j -= 1
tag_ranges = r[2 * i:2 * j + 2]
if i > 0 and sorting(fin[i - 1]) > sorting(i1):
if i - 1 <= j:
tag_ranges.insert(0, fin[i - 1])
tag_ranges.insert(0, i1)
else:
tag_ranges.insert(0, i2)
tag_ranges.insert(0, i1)
return tag_ranges
if j < len(fin) - 1 and sorting(deb[j + 1]) < sorting(i2):
tag_ranges.append(deb[j + 1])
tag_ranges.append(i2)
return tag_ranges
| gpl-3.0 |
jmuhlich/pysb | pysb/examples/paper_figures/fig6.py | 5 | 9204 | """Produce contact map for Figure 5D from the PySB publication"""
from __future__ import print_function
import pysb.integrate
import pysb.util
import numpy as np
import scipy.optimize
import scipy.interpolate
import matplotlib.pyplot as plt
import os
import sys
import inspect
from earm.lopez_embedded import model
# List of model observables and corresponding data file columns for
# point-by-point fitting
obs_names = ['mBid', 'cPARP']
data_names = ['norm_ICRP', 'norm_ECRP']
var_names = ['nrm_var_ICRP', 'nrm_var_ECRP']
# Load experimental data file
data_path = os.path.join(os.path.dirname(__file__), 'fig6_data.csv')
exp_data = np.genfromtxt(data_path, delimiter=',', names=True)
# Model observable corresponding to the IMS-RP reporter (MOMP timing)
momp_obs = 'aSmac'
# Mean and variance of Td (delay time) and Ts (switching time) of MOMP, and
# yfinal (the last value of the IMS-RP trajectory)
momp_data = np.array([9810.0, 180.0, 1.0])
momp_var = np.array([7245000.0, 3600.0, 1e-9])
# Build time points for the integrator, using the same time scale as the
# experimental data but with greater resolution to help the integrator converge.
ntimes = len(exp_data['Time'])
# Factor by which to increase time resolution
tmul = 10
# Do the sampling such that the original experimental timepoints can be
# extracted with a slice expression instead of requiring interpolation.
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
(ntimes-1) * tmul + 1)
# Initialize solver object
solver = pysb.integrate.Solver(model, tspan, rtol=1e-5, atol=1e-5)
# Get parameters for rates only
rate_params = model.parameters_rules()
# Build a boolean mask for those params against the entire param list
rate_mask = np.array([p in rate_params for p in model.parameters])
# Build vector of nominal parameter values from the model
nominal_values = np.array([p.value for p in model.parameters])
# Set the radius of a hypercube bounding the search space
bounds_radius = 2
def objective_func(x, rate_mask, lb, ub):
caller_frame, _, _, caller_func, _, _ = inspect.stack()[1]
if caller_func in {'anneal', '_minimize_anneal'}:
caller_locals = caller_frame.f_locals
if caller_locals['n'] == 1:
print(caller_locals['best_state'].cost, caller_locals['current_state'].cost)
# Apply hard bounds
if np.any((x < lb) | (x > ub)):
print("bounds-check failed")
return np.inf
# Simulate model with rates taken from x (which is log transformed)
param_values = np.array([p.value for p in model.parameters])
param_values[rate_mask] = 10 ** x
solver.run(param_values)
# Calculate error for point-by-point trajectory comparisons
e1 = 0
for obs_name, data_name, var_name in zip(obs_names, data_names, var_names):
# Get model observable trajectory (this is the slice expression
# mentioned above in the comment for tspan)
ysim = solver.yobs[obs_name][::tmul]
# Normalize it to 0-1
ysim_norm = ysim / np.nanmax(ysim)
# Get experimental measurement and variance
ydata = exp_data[data_name]
yvar = exp_data[var_name]
# Compute error between simulation and experiment (chi-squared)
e1 += np.sum((ydata - ysim_norm) ** 2 / (2 * yvar)) / len(ydata)
# Calculate error for Td, Ts, and final value for IMS-RP reporter
# =====
# Normalize trajectory
ysim_momp = solver.yobs[momp_obs]
ysim_momp_norm = ysim_momp / np.nanmax(ysim_momp)
# Build a spline to interpolate it
st, sc, sk = scipy.interpolate.splrep(solver.tspan, ysim_momp_norm)
# Use root-finding to find the point where trajectory reaches 10% and 90%
t10 = scipy.interpolate.sproot((st, sc-0.10, sk))[0]
t90 = scipy.interpolate.sproot((st, sc-0.90, sk))[0]
# Calculate Td as the mean of these times
td = (t10 + t90) / 2
# Calculate Ts as their difference
ts = t90 - t10
# Get yfinal, the last element from the trajectory
yfinal = ysim_momp_norm[-1]
# Build a vector of the 3 variables to fit
momp_sim = [td, ts, yfinal]
# Perform chi-squared calculation against mean and variance vectors
e2 = np.sum((momp_data - momp_sim) ** 2 / (2 * momp_var)) / 3
# Calculate error for final cPARP value (ensure all PARP is cleaved)
cparp_final = model.parameters['PARP_0'].value
cparp_final_var = .01
cparp_final_sim = solver.yobs['cPARP'][-1]
e3 = (cparp_final - cparp_final_sim) ** 2 / (2 * cparp_final_var)
error = e1 + e2 + e3
return error
def estimate(start_values=None):
"""Estimate parameter values by fitting to data.
Parameters
==========
parameter_values : numpy array of floats, optional
Starting parameter values. Taken from model's nominal parameter values
if not specified.
Returns
=======
numpy array of floats, containing fitted parameter values.
"""
# Set starting position to nominal parameter values if not specified
if start_values is None:
start_values = nominal_values
else:
assert start_values.shape == nominal_values.shape
# Log-transform the starting position
x0 = np.log10(start_values[rate_mask])
# Displacement size for annealing moves
dx = .02
# The default 'fast' annealing schedule uses the 'lower' and 'upper'
# arguments in a somewhat counterintuitive way. See
# http://projects.scipy.org/scipy/ticket/1126 for more information. This is
# how to get the search to start at x0 and use a displacement on the order
# of dx (note that this will affect the T0 estimation which *does* expect
# lower and upper to be the absolute expected bounds on x).
lower = x0 - dx / 2
upper = x0 + dx / 2
# Log-transform the rate parameter values
xnominal = np.log10(nominal_values[rate_mask])
# Hard lower and upper bounds on x
lb = xnominal - bounds_radius
ub = xnominal + bounds_radius
# Perform the annealing
args = [rate_mask, lb, ub]
(xmin, Jmin, Tfinal, feval, iters, accept, retval) = \
scipy.optimize.anneal(objective_func, x0, full_output=True,
maxiter=4000, quench=0.5,
lower=lower, upper=upper,
args=args)
# Construct vector with resulting parameter values (un-log-transformed)
params_estimated = start_values.copy()
params_estimated[rate_mask] = 10 ** xmin
# Display annealing results
for v in ('xmin', 'Jmin', 'Tfinal', 'feval', 'iters', 'accept', 'retval'):
print("%s: %s" % (v, locals()[v]))
return params_estimated
def display(params_estimated):
# Simulate model with nominal parameters and construct a matrix of the
# trajectories of the observables of interest, normalized to 0-1.
solver.run()
obs_names_disp = ['mBid', 'aSmac', 'cPARP']
obs_totals = [model.parameters[n].value for n in ('Bid_0', 'Smac_0', 'PARP_0')]
sim_obs = solver.yobs[obs_names_disp].view(float).reshape(len(solver.yobs), -1)
sim_obs_norm = (sim_obs / obs_totals).T
# Do the same with the estimated parameters
solver.run(params_estimated)
sim_est_obs = solver.yobs[obs_names_disp].view(float).reshape(len(solver.yobs), -1)
sim_est_obs_norm = (sim_est_obs / obs_totals).T
# Plot data with simulation trajectories both before and after fitting
color_data = '#C0C0C0'
color_orig = '#FAAA6A'
color_est = '#83C98E'
plt.subplot(311)
plt.errorbar(exp_data['Time'], exp_data['norm_ICRP'],
yerr=exp_data['nrm_var_ICRP']**0.5, c=color_data, linewidth=2,
elinewidth=0.5)
plt.plot(solver.tspan, sim_obs_norm[0], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[0], color_est, linewidth=2)
plt.ylabel('Fraction of\ncleaved IC-RP/Bid', multialignment='center')
plt.axis([0, 20000, -0.2, 1.2])
plt.subplot(312)
plt.vlines(momp_data[0], -0.2, 1.2, color=color_data, linewidth=2)
plt.plot(solver.tspan, sim_obs_norm[1], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[1], color_est, linewidth=2)
plt.ylabel('Td / Fraction of\nreleased Smac', multialignment='center')
plt.axis([0, 20000, -0.2, 1.2])
plt.subplot(313)
plt.errorbar(exp_data['Time'], exp_data['norm_ECRP'],
yerr=exp_data['nrm_var_ECRP']**0.5, c=color_data, linewidth=2,
elinewidth=0.5)
plt.plot(solver.tspan, sim_obs_norm[2], color_orig, linewidth=2)
plt.plot(solver.tspan, sim_est_obs_norm[2], color_est, linewidth=2)
plt.ylabel('Fraction of\ncleaved EC-RP/PARP', multialignment='center')
plt.xlabel('Time (s)')
plt.axis([0, 20000, -0.2, 1.2])
plt.show()
if __name__ == '__main__':
params_estimated = None
try:
earm_path = sys.modules['earm'].__path__[0]
fit_file = os.path.join(earm_path, '..', 'EARM_2_0_M1a_fitted_params.txt')
params_estimated = np.genfromtxt(fit_file)[:,1].copy()
except IOError:
pass
if params_estimated is None:
np.random.seed(1)
params_estimated = estimate()
display(params_estimated)
| bsd-2-clause |
glemaitre/UnbalancedDataset | imblearn/ensemble/classifier.py | 2 | 9929 | """Ensemble predictors combining a sampler and a classifier."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
import numbers
import numpy as np
from sklearn.base import clone
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble.bagging import _generate_bagging_indices
from ..pipeline import Pipeline
from ..under_sampling import RandomUnderSampler
old_generate = _generate_bagging_indices
class BalancedBaggingClassifier(BaggingClassifier):
"""A Bagging classifier with additional balancing.
This implementation of Bagging is similar to the scikit-learn
implementation. It includes an additional step to balance the training set
at fit time using a ``RandomUnderSampler``.
Read more in the :ref:`User Guide <ensemble_meta_estimators>`.
Parameters
----------
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
- If int, then draw ``max_samples`` samples.
- If float, then draw ``max_samples * X.shape[0]`` samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw ``max_features`` features.
- If float, then draw ``max_features * X.shape[1]`` features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble.
.. versionadded:: 0.17
*warm_start* constructor parameter.
ratio : str, dict, or callable, optional (default='auto')
Ratio to use for resampling the data set.
- If ``str``, has to be one of: (i) ``'minority'``: resample the
minority class; (ii) ``'majority'``: resample the majority class,
(iii) ``'not minority'``: resample all classes apart of the minority
class, (iv) ``'all'``: resample all classes, and (v) ``'auto'``:
correspond to ``'all'`` with for over-sampling methods and ``'not
minority'`` for under-sampling methods. The classes targeted will be
over-sampled or under-sampled to achieve an equal number of sample
with the majority or minority class.
- If ``dict``, the keys correspond to the targeted classes. The values
correspond to the desired number of samples.
- If callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples.
replacement : bool, optional (default=False)
Whether or not to sample randomly with replacement or not.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
- If int, ``random_state`` is the seed used by the random number
generator;
- If ``RandomState`` instance, random_state is the random
number generator;
- If ``None``, the random number generator is the
``RandomState`` instance used by ``np.random``.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by a boolean mask.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
classes_ : array, shape (n_classes,)
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : ndarray, shape (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
``oob_decision_function_`` might contain NaN.
Notes
-----
This is possible to turn this classifier into a balanced random forest [5]_
by passing a :class:`sklearn.tree.DecisionTreeClassifier` with
`max_features='auto'` as a base estimator.
See
:ref:`sphx_glr_auto_examples_ensemble_plot_comparison_bagging_classifier.py`.
See also
--------
BalanceCascade, EasyEnsemble
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
.. [5] Chen, Chao, Andy Liaw, and Leo Breiman. "Using random forest to
learn imbalanced data." University of California, Berkeley 110,
2004.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.metrics import confusion_matrix
>>> from imblearn.ensemble import \
BalancedBaggingClassifier # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> bbc = BalancedBaggingClassifier(random_state=42)
>>> bbc.fit(X_train, y_train) # doctest: +ELLIPSIS
BalancedBaggingClassifier(...)
>>> y_pred = bbc.predict(X_test)
>>> print(confusion_matrix(y_test, y_pred))
[[ 23 0]
[ 2 225]]
"""
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
ratio='auto',
replacement=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaggingClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.ratio = ratio
self.replacement = replacement
def _validate_estimator(self, default=DecisionTreeClassifier()):
"""Check the estimator and the n_estimator attribute, set the
`base_estimator_` attribute."""
if not isinstance(self.n_estimators, (numbers.Integral, np.integer)):
raise ValueError("n_estimators must be an integer, "
"got {0}.".format(type(self.n_estimators)))
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than zero, "
"got {0}.".format(self.n_estimators))
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = clone(default)
self.base_estimator_ = Pipeline(
[('sampler', RandomUnderSampler(ratio=self.ratio,
replacement=self.replacement)),
('classifier', base_estimator)])
def fit(self, X, y):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
Returns
-------
self : object
Returns self.
"""
# RandomUnderSampler is not supporting sample_weight. We need to pass
# None.
return self._fit(X, y, self.max_samples, sample_weight=None)
| mit |
RomainBrault/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 55 | 2433 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plt.scatter(X[labels == outer, 0], X[labels == outer, 1], color='navy',
marker='s', lw=0, label="outer labeled", s=10)
plt.scatter(X[labels == inner, 0], X[labels == inner, 1], color='c',
marker='s', lw=0, label='inner labeled', s=10)
plt.scatter(X[labels == -1, 0], X[labels == -1, 1], color='darkorange',
marker='.', label='unlabeled')
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Raw data (2 classes=outer and inner)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plt.scatter(X[outer_numbers, 0], X[outer_numbers, 1], color='navy',
marker='s', lw=0, s=10, label="outer learned")
plt.scatter(X[inner_numbers, 0], X[inner_numbers, 1], color='c',
marker='s', lw=0, s=10, label="inner learned")
plt.legend(scatterpoints=1, shadow=False, loc='upper right')
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
cmeessen/fatiando | doc/conf.py | 5 | 5652 | # -*- coding: utf-8 -*-
import sys
import os
import datetime
import sphinx_bootstrap_theme
import matplotlib as mpl
mpl.use("Agg")
# Sphinx needs to be able to import fatiando to use autodoc
sys.path.append(os.path.pardir)
from fatiando import __version__, __commit__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'matplotlib.sphinxext.plot_directive',
'sphinx_gallery.gen_gallery',
]
# Produce pages for each class and function
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Configure the sphinx-gallery plugin
sphinx_gallery_conf = {
'examples_dirs': ['../gallery'],
'gallery_dirs': ['gallery'],
'filename_pattern': os.sep + '*', # Match any .py file
'backreferences_dir': False,
}
# Configure the inline plots from matplotlib plot_directive
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# Sphinx project configuration
templates_path = ['_templates']
exclude_patterns = ['_build']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
master_doc = 'index'
# General information about the project
year = datetime.date.today().year
project = u'Fatiando a Terra'
copyright = u'2010-{:d}, Leonardo Uieda'.format(year)
if len(__version__.split('-')) > 1 or __version__ == 'unknown':
version = 'dev'
else:
version = __version__
# I'll use the release to place the commit hash at the footer of the site
release = __commit__.split('-')[0] # Get rid of -dirty
doi = '10.6084/m9.figshare.1115194'
# These enable substitutions using |variable| in the rst files
rst_epilog = """
.. |doi| replace:: {doi}
.. |doilink| replace:: doi:`{doi} <http://dx.doi.org/{doi}>`__
.. |year| replace:: {year}
""".format(doi=doi, year=year)
html_last_updated_fmt = '%b %d, %Y'
html_title = 'Fatiando {}'.format(version)
html_short_title = 'Fatiando a Terra'
html_logo = '_static/fatiando-navbar-logo.png'
html_favicon = u'favicon.ico'
html_static_path = ['_static']
html_extra_path = ['.nojekyll', 'CNAME']
html_use_smartypants = True
pygments_style = 'default'
add_function_parentheses = False
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'install': ['localtoc.html'],
'develop': ['localtoc.html'],
'cookbook': ['localtoc.html'],
'changelog': ['localtoc.html'],
'api/**': ['localtoc.html'],
'api': ['localtoc.html'],
'gallery/index': ['localtoc.html'],
'use_cases': ['localtoc.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FatiandoATerraDoc'
# Theme config
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
'bootswatch_theme': "flatly",
'navbar_title': 'fatiando',
'navbar_site_name': "Site",
'navbar_links': [
("Install", "install"),
("Gallery", "gallery/index"),
# ("Tutorials", ""),
("API", "api"),
("Docs", "docs"),
("Contribute", "develop"),
("Cite", "cite"),
('<i class="fa fa-github fa-lg" title="Source code on Github"></i>', "https://github.com/fatiando/fatiando", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "This page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 1,
# Include hidden TOCs in Site navbar?
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
# Values: "true" (default) or "false"
'globaltoc_includehidden': "false",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-default",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "false",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
'bootstrap_version': "3",
}
# Load the custom CSS files (needs sphinx >= 1.6 for this to work)
def setup(app):
app.add_stylesheet("style.css")
app.add_stylesheet("font-awesome/css/font-awesome.css")
| bsd-3-clause |
balazssimon/ml-playground | udemy/lazyprogrammer/deep-reinforcement-learning-python/atari/dqn_tf.py | 1 | 8743 | import copy
import gym
import os
import sys
import random
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
from scipy.misc import imresize
##### testing only
MAX_EXPERIENCES = 10000
MIN_EXPERIENCES = 1000
#MAX_EXPERIENCES = 500000
#MIN_EXPERIENCES = 50000
TARGET_UPDATE_PERIOD = 10000
IM_SIZE = 80
K = 4 #env.action_space.n
def downsample_image(A):
B = A[31:195] # select the important parts of the image
B = B.mean(axis=2) # convert to grayscale
# downsample image
# changing aspect ratio doesn't significantly distort the image
# nearest neighbor interpolation produces a much sharper image
# than default bilinear
B = imresize(B, size=(IM_SIZE, IM_SIZE), interp='nearest')
return B
def update_state(state, obs):
obs_small = downsample_image(obs)
return np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
class DQN:
def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma, scope):
self.K = K
self.scope = scope
with tf.variable_scope(scope):
# inputs and targets
self.X = tf.placeholder(tf.float32, shape=(None, 4, IM_SIZE, IM_SIZE), name='X')
# tensorflow convolution needs the order to be:
# (num_samples, height, width, "color")
# so we need to tranpose later
self.G = tf.placeholder(tf.float32, shape=(None,), name='G')
self.actions = tf.placeholder(tf.int32, shape=(None,), name='actions')
# calculate output and cost
# convolutional layers
# these built-in layers are faster and don't require us to
# calculate the size of the output of the final conv layer!
Z = self.X / 255.0
Z = tf.transpose(Z, [0, 2, 3, 1])
for num_output_filters, filtersz, poolsz in conv_layer_sizes:
Z = tf.contrib.layers.conv2d(
Z,
num_output_filters,
filtersz,
poolsz,
activation_fn=tf.nn.relu
)
# fully connected layers
Z = tf.contrib.layers.flatten(Z)
for M in hidden_layer_sizes:
Z = tf.contrib.layers.fully_connected(Z, M)
# final output layer
self.predict_op = tf.contrib.layers.fully_connected(Z, K)
selected_action_values = tf.reduce_sum(
self.predict_op * tf.one_hot(self.actions, K),
reduction_indices=[1]
)
cost = tf.reduce_mean(tf.square(self.G - selected_action_values))
# self.train_op = tf.train.AdamOptimizer(1e-2).minimize(cost)
# self.train_op = tf.train.AdagradOptimizer(1e-2).minimize(cost)
# self.train_op = tf.train.RMSPropOptimizer(2.5e-4, decay=0.99, epsilon=1e-3).minimize(cost)
self.train_op = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6).minimize(cost)
# self.train_op = tf.train.MomentumOptimizer(1e-3, momentum=0.9).minimize(cost)
# self.train_op = tf.train.GradientDescentOptimizer(1e-4).minimize(cost)
self.cost = cost
def copy_from(self, other):
mine = [t for t in tf.trainable_variables() if t.name.startswith(self.scope)]
mine = sorted(mine, key=lambda v: v.name)
theirs = [t for t in tf.trainable_variables() if t.name.startswith(other.scope)]
theirs = sorted(theirs, key=lambda v: v.name)
ops = []
for p, q in zip(mine, theirs):
actual = self.session.run(q)
op = p.assign(actual)
ops.append(op)
self.session.run(ops)
def set_session(self, session):
self.session = session
def predict(self, states):
return self.session.run(self.predict_op, feed_dict={self.X: states})
def update(self, states, actions, targets):
c, _ = self.session.run(
[self.cost, self.train_op],
feed_dict={
self.X: states,
self.G: targets,
self.actions: actions
}
)
return c
def sample_action(self, x, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
return np.argmax(self.predict([x])[0])
def learn(model, target_model, experience_replay_buffer, gamma, batch_size):
# Sample experiences
samples = random.sample(experience_replay_buffer, batch_size)
states, actions, rewards, next_states, dones = map(np.array, zip(*samples))
# Calculate targets
next_Qs = target_model.predict(next_states)
next_Q = np.amax(next_Qs, axis=1)
targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q
# Update model
loss = model.update(states, actions, targets)
return loss
def play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_size,
epsilon,
epsilon_change,
epsilon_min):
t0 = datetime.now()
# Reset the environment
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
assert(state.shape == (4, 80, 80))
loss = None
total_time_training = 0
num_steps_in_episode = 0
episode_reward = 0
done = False
while not done:
# Update target network
if total_t % TARGET_UPDATE_PERIOD == 0:
target_model.copy_from(model)
print("Copied model parameters to target network. total_t = %s, period = %s" % (total_t, TARGET_UPDATE_PERIOD))
# Take action
action = model.sample_action(state, epsilon)
obs, reward, done, _ = env.step(action)
obs_small = downsample_image(obs)
next_state = np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
# assert(state.shape == (4, 80, 80))
episode_reward += reward
# Remove oldest experience if replay buffer is full
if len(experience_replay_buffer) == MAX_EXPERIENCES:
experience_replay_buffer.pop(0)
# Save the latest experience
experience_replay_buffer.append((state, action, reward, next_state, done))
# Train the model, keep track of time
t0_2 = datetime.now()
loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size)
dt = datetime.now() - t0_2
total_time_training += dt.total_seconds()
num_steps_in_episode += 1
state = next_state
total_t += 1
epsilon = max(epsilon - epsilon_change, epsilon_min)
return total_t, episode_reward, (datetime.now() - t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon
if __name__ == '__main__':
# hyperparams and initialize stuff
conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
hidden_layer_sizes = [512]
gamma = 0.99
batch_sz = 32
num_episodes = 2
total_t = 0
experience_replay_buffer = []
episode_rewards = np.zeros(num_episodes)
# epsilon
# decays linearly until 0.1
epsilon = 1.0
epsilon_min = 0.1
epsilon_change = (epsilon - epsilon_min) / 500000
# Create environment
env = gym.envs.make("Breakout-v0")
# Create models
model = DQN(
K=K,
conv_layer_sizes=conv_layer_sizes,
hidden_layer_sizes=hidden_layer_sizes,
gamma=gamma,
scope="model")
target_model = DQN(
K=K,
conv_layer_sizes=conv_layer_sizes,
hidden_layer_sizes=hidden_layer_sizes,
gamma=gamma,
scope="target_model"
)
with tf.Session() as sess:
model.set_session(sess)
target_model.set_session(sess)
sess.run(tf.global_variables_initializer())
print("Populating experience replay buffer...")
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
# assert(state.shape == (4, 80, 80))
for i in range(MIN_EXPERIENCES):
action = np.random.choice(K)
obs, reward, done, _ = env.step(action)
next_state = update_state(state, obs)
# assert(state.shape == (4, 80, 80))
experience_replay_buffer.append((state, action, reward, next_state, done))
if done:
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
# assert(state.shape == (4, 80, 80))
else:
state = next_state
# Play a number of episodes and learn!
for i in range(num_episodes):
total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_sz,
epsilon,
epsilon_change,
epsilon_min,
)
episode_rewards[i] = episode_reward
last_100_avg = episode_rewards[max(0, i - 100):i + 1].mean()
print("Episode:", i,
"Duration:", duration,
"Num steps:", num_steps_in_episode,
"Reward:", episode_reward,
"Training time per step:", "%.3f" % time_per_step,
"Avg Reward (Last 100):", "%.3f" % last_100_avg,
"Epsilon:", "%.3f" % epsilon
)
sys.stdout.flush()
| apache-2.0 |
glucksfall/pleiades | bbrc/scripts/parallel_kasim.py | 1 | 1616 | import glob
import shlex
import subprocess
import multiprocessing
from pandas import DataFrame
def precompile_kappa():
cmd = 'KaSim -i test_model.kappa -e 0 -p 0 -make-sim test_model.bin -o data.out --batch'
process = subprocess.Popen(shlex.split(cmd), shell = False)
process.wait()
cmd = 'rm -f ./profiling.html data.out'
process = subprocess.Popen(shlex.split(cmd), shell = False)
process.wait()
return
def run_kasim_precompiled(num):
cmd = 'KaSim -load-sim test_model.bin -t 1000 -p 1000 -o test_model.{:03d}.out.txt --batch'.format(num)
process = subprocess.Popen(shlex.split(cmd), shell = False)
process.wait()
return
def read_data(file):
return pandas.read_csv(file, delimiter = ' ', header = 1)
def statistics(data):
rows = range(len(data[0].axes[0]))
cols = list(data[0].columns.values)
avrg = pandas.DataFrame(index = rows, columns = cols).fillna(0)
stdv = pandas.DataFrame(index = rows, columns = cols).fillna(0)
for i in range(0, len(data)):
avrg += data[i].divide(len(data))
DataFrame.to_csv(avrg, path_or_buf = './test_model.avrg.txt', sep = ' ', index = False, float_format = '%.3f')
for i in range(0, len(data)):
stdv += (((data[i] - avrg)**2).divide(len(data)-1))
DataFrame.to_csv(stdv**(0.5), path_or_buf = './test_model.stdv.txt', sep = ' ', index = False, float_format = '%.3f')
return
if __name__ == '__main__':
precompile_kappa()
sims = multiprocessing.Pool(multiprocessing.cpu_count()-1).map(run_kasim_precompiled, range(0, 1000))
data = multiprocessing.Pool(multiprocessing.cpu_count()-1).map(read_data, glob.glob("*.out.txt"))
avrg = statistics(data)
| gpl-3.0 |
sonnyhu/scikit-learn | build_tools/cythonize.py | 42 | 6375 | #!/usr/bin/env python
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'sklearn'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script detects changes in the pyx/pxd files using checksums
[or hashes] stored in a database file
Simple script to invoke Cython on all .pyx
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
It is called by ./setup.py sdist so that sdist package can be installed without
cython
Originally written by Dag Sverre Seljebotn, and adapted from statsmodel 0.6.1
(Modified BSD 3-clause)
We copied it for scikit-learn.
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files or their corresponding Cython header (.pxd)
files.
"""
# Author: Arthur Mensch <[email protected]>
# Author: Raghav R V <[email protected]>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'sklearn'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
def cythonize(cython_file, gen_file):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.21'):
raise Exception('Building scikit-learn requires Cython >= 0.21')
except ImportError:
pass
flags = ['--fast-fail']
if gen_file.endswith('.cpp'):
flags += ['--cplus']
try:
try:
rc = subprocess.call(['cython'] +
flags + ["-o", gen_file, cython_file])
if rc != 0:
raise Exception('Cythonizing %s failed' % cython_file)
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see scipy issue gh-2397.
rc = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main '
'import setuptools_main as main;'
' sys.exit(main())'] + flags +
["-o", gen_file, cython_file])
if rc != 0:
raise Exception('Cythonizing %s failed' % cython_file)
except OSError:
raise OSError('Cython needs to be installed')
def load_hashes(filename):
"""Load the hashes dict from the hashfile"""
# { filename : (sha1 of header if available or 'NA',
# sha1 of input,
# sha1 of output) }
hashes = {}
try:
with open(filename, 'r') as cython_hash_file:
for hash_record in cython_hash_file:
(filename, header_hash,
cython_hash, gen_file_hash) = hash_record.split()
hashes[filename] = (header_hash, cython_hash, gen_file_hash)
except (KeyError, ValueError, AttributeError, IOError):
hashes = {}
return hashes
def save_hashes(hashes, filename):
"""Save the hashes dict to the hashfile"""
with open(filename, 'w') as cython_hash_file:
for key, value in hashes.items():
cython_hash_file.write("%s %s %s %s\n"
% (key, value[0], value[1], value[2]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
def clean_path(path):
"""Clean the path"""
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash_tuple(header_path, cython_path, gen_file_path):
"""Get the hashes from the given files"""
header_hash = (sha1_of_file(header_path)
if os.path.exists(header_path) else 'NA')
from_hash = sha1_of_file(cython_path)
to_hash = (sha1_of_file(gen_file_path)
if os.path.exists(gen_file_path) else 'NA')
return header_hash, from_hash, to_hash
def cythonize_if_unchanged(path, cython_file, gen_file, hashes):
full_cython_path = os.path.join(path, cython_file)
full_header_path = full_cython_path.replace('.pyx', '.pxd')
full_gen_file_path = os.path.join(path, gen_file)
current_hash = get_hash_tuple(full_header_path, full_cython_path,
full_gen_file_path)
if current_hash == hashes.get(clean_path(full_cython_path)):
print('%s has not changed' % full_cython_path)
return
print('Processing %s' % full_cython_path)
cythonize(full_cython_path, full_gen_file_path)
# changed target file, recompute hash
current_hash = get_hash_tuple(full_header_path, full_cython_path,
full_gen_file_path)
# Update the hashes dict with the new hash
hashes[clean_path(full_cython_path)] = current_hash
def check_and_cythonize(root_dir):
print(root_dir)
hashes = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
if filename.endswith('.pyx'):
gen_file_ext = '.c'
# Cython files with libcpp imports should be compiled to cpp
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(b"libcpp", data, re.I | re.M)
if m:
gen_file_ext = ".cpp"
cython_file = filename
gen_file = filename.replace('.pyx', gen_file_ext)
cythonize_if_unchanged(cur_dir, cython_file, gen_file, hashes)
# Save hashes once per module. This prevents cythonizing prev.
# files again when debugging broken code in a single file
save_hashes(hashes, HASH_FILE)
def main(root_dir=DEFAULT_ROOT):
check_and_cythonize(root_dir)
if __name__ == '__main__':
try:
root_dir_arg = sys.argv[1]
except IndexError:
root_dir_arg = DEFAULT_ROOT
main(root_dir_arg)
| bsd-3-clause |
dfm/kpsf | test2.py | 1 | 4612 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = []
import kplr
import numpy as np
from simplexy import simplexy
import matplotlib.pyplot as pl
from kpsf import TimeSeries, PSF
client = kplr.API()
tpf = client.k2_star(202137899).get_target_pixel_files()[0]
data = tpf.read()
times = data["TIME"]
flux_images = data["FLUX"]
ferr_images = data["FLUX_ERR"]
quality = data["QUALITY"]
flux_images[flux_images == 0.0] = np.nan
psf = PSF(0.25, 0.25, 0.0)
psf.add_component(-1.5, 1.5, 1.5, -0.5, dx=0.05, dy=0.25)
# for i in range(2):
# psf.add_component(-5.5-4.*i, 4.+2*i, 4.+2*i)
print((len(psf.pars) + 3) // 6)
print(psf(0, 0))
blah = []
n1, n2 = -45, -10
s = range(n1, n2)
psf = PSF(0.25, 0.25, 0.0)
psf.add_component(-1.5, 1.5, 1.5, -0.5, dx=0.05, dy=0.25)
# for i in range(2):
# psf.add_component(-5.5-4.*i, 4.+2*i, 4.+2*i)
ts = TimeSeries(times[s], flux_images[s], ferr_images[s],
quality[s], saddle=0.1)
results = ts.solve(psf, fit_response=0)
# results = ts.solve(psf, fit_psf=0, response_strength=1e5)
assert 0
pl.figure(figsize=(10, 10))
for i, j in enumerate(np.arange(len(ts.frames))[ts.good_times]):
frame = ts.frames[j]
img = frame.predict(psf, origin=ts.origin[j], offsets=ts.offsets,
response=ts.response)
shape = frame.shape
pos = ts.offsets[:, None, :] + ts.origin[j][None, :, :]
pl.clf()
pl.subplot(221)
pl.imshow(np.log(img.T), cmap="gray", interpolation="nearest")
pl.plot(pos[:, :, 0], pos[:, :, 1], "+r")
pl.xlim(-0.5, shape[0]-0.5)
pl.ylim(-0.5, shape[1]-0.5)
pl.subplot(222)
pl.imshow(np.log(frame.img.T), cmap="gray", interpolation="nearest")
pl.plot(pos[:, :, 0], pos[:, :, 1], "+r")
pl.xlim(-0.5, shape[0]-0.5)
pl.ylim(-0.5, shape[1]-0.5)
pl.subplot(223)
pl.imshow(((img - frame.img) / img).T, cmap="gray",
interpolation="nearest")
pl.colorbar()
pl.plot(pos[:, :, 0], pos[:, :, 1], "+r")
pl.xlim(-0.5, shape[0]-0.5)
pl.ylim(-0.5, shape[1]-0.5)
pl.subplot(224)
pl.imshow(ts.response.T, cmap="gray",
interpolation="nearest")
pl.plot(pos[:, :, 0], pos[:, :, 1], "+r")
pl.xlim(-0.5, shape[0]-0.5)
pl.ylim(-0.5, shape[1]-0.5)
pl.title("{0} {1}".format(i, j))
pl.savefig("frames/{0:05d}.png".format(i))
assert 0
img = flux_images[-1000]
img[img == 0.0] = np.nan
frame = Frame(img)
frame.initialize()
print(frame.coords)
assert 0
obs = flux[-1000]
pixel_mask = np.isfinite(obs)
pixel_mask[pixel_mask] *= (obs[pixel_mask] > 0.0)
tmp = np.array(obs)
tmp[~pixel_mask] = np.median(obs[pixel_mask])
coords = simplexy(tmp)
mu = np.median(tmp)
sig = np.sqrt(np.median((tmp - mu) ** 2))
vmin, vmax = mu - sig, mu + 100 * sig
shape = obs.shape
x, y = np.meshgrid(range(shape[0]), range(shape[1]), indexing="ij")
x = np.array(x[pixel_mask], dtype=np.float64)
y = np.array(y[pixel_mask], dtype=np.float64)
psfpars = np.array([0.25, 0.25, 0.0]
+ [v for j in range(2)
for v in [-2.0 - 100 * j, 0.0, 0.0, 2.0+j, 2.0+j, -0.5]])
norm = compute_psf(psfpars, 0.0, 0.0)
fluxes = np.array(coords["flux"] / norm, dtype=np.float64)
frame_center = np.array([0.0, 0.0])
offsets = np.array([[r["x"], r["y"]] for r in coords], dtype=np.float64)
bkg = np.median(coords["bkg"])
print(fluxes)
response = np.ones_like(x)
img = compute_scene(x, y, fluxes, frame_center, offsets, psfpars, bkg,
response)
# # Update the fluxes.
# A = np.vander(img - bkg, 2)
# ATA = np.dot(A.T, A)
# w = np.linalg.solve(ATA, np.dot(A.T, obs[pixel_mask] - bkg))
# print(w)
# fluxes *= w[0]
# bkg += w[1]
# print(fluxes)
# # Re-compute the prediction.
# img = compute_scene(x, y, fluxes, frame_center, offsets, psfpars, bkg,
# response)
result = np.nan + np.zeros_like(obs)
result[(x.astype(int), y.astype(int))] = img
pl.figure(figsize=(10, 10))
pl.subplot(221)
pl.imshow(result.T, cmap="gray", interpolation="nearest", vmin=vmin, vmax=vmax)
pl.plot(coords["x"], coords["y"], "+r")
pl.xlim(-0.5, shape[0]-0.5)
pl.ylim(-0.5, shape[1]-0.5)
pl.subplot(222)
pl.imshow(obs.T, cmap="gray", interpolation="nearest", vmin=vmin, vmax=vmax)
pl.plot(coords["x"], coords["y"], "+r")
pl.xlim(-0.5, shape[0]-0.5)
pl.ylim(-0.5, shape[1]-0.5)
pl.subplot(223)
dv = 0.5 * (vmax - vmin)
pl.imshow((obs - result).T, cmap="gray", interpolation="nearest",
vmin=-dv, vmax=dv)
pl.plot(coords["x"], coords["y"], "+r")
pl.xlim(-0.5, shape[0]-0.5)
pl.ylim(-0.5, shape[1]-0.5)
pl.savefig("blah.png")
| mit |
linebp/pandas | pandas/tests/frame/test_missing.py | 9 | 26646 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
try:
import scipy
_is_scipy_ge_0190 = scipy.__version__ >= LooseVersion('0.19.0')
except:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(TestData):
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
assert (frame['bar'] == 5).all()
inp_frame2.dropna(subset=['bar'], inplace=True)
tm.assert_index_equal(samesize_frame.index, self.frame.index)
tm.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.loc[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.loc[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.loc[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.loc[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.loc[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
pytest.raises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name='A')
expected = Series([1, 2], dtype=original.dtype, name='A')
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
# bad input
pytest.raises(ValueError, self.frame.dropna, how='foo')
pytest.raises(TypeError, self.frame.dropna, how=None)
# non-existent column - 8303
pytest.raises(KeyError, self.frame.dropna, subset=['A', 'X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_fillna(self):
tf = self.tsframe
tf.loc[tf.index[:5], 'A'] = nan
tf.loc[tf.index[-5:], 'A'] = nan
zero_filled = self.tsframe.fillna(0)
assert (zero_filled.loc[zero_filled.index[:5], 'A'] == 0).all()
padded = self.tsframe.fillna(method='pad')
assert np.isnan(padded.loc[padded.index[:5], 'A']).all()
assert (padded.loc[padded.index[-5:], 'A'] ==
padded.loc[padded.index[-5], 'A']).all()
# mixed type
mf = self.mixed_frame
mf.loc[mf.index[5:20], 'foo'] = nan
mf.loc[mf.index[-10:], 'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
pytest.raises(ValueError, self.tsframe.fillna)
pytest.raises(ValueError, self.tsframe.fillna, 5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A', 'B', 'D'])
mf.loc[mf.index[-10:], 'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype=dict(C=None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype=dict(C=None))
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad', 'backfill']:
df.x.fillna(method=m, inplace=True)
df.x.fillna(method=m)
# with different dtype (GH3386)
df = DataFrame([['a', 'a', np.nan, 'a'], [
'b', 'b', np.nan, 'b'], ['c', 'c', np.nan, 'c']])
result = df.fillna({2: 'foo'})
expected = DataFrame([['a', 'a', 'foo', 'a'],
['b', 'b', 'foo', 'b'],
['c', 'c', 'foo', 'c']])
assert_frame_equal(result, expected)
df.fillna({2: 'foo'}, inplace=True)
assert_frame_equal(df, expected)
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
assert_frame_equal(result, expected)
# with datelike
# GH 6344
df = DataFrame({
'Date': [pd.NaT, Timestamp("2014-1-1")],
'Date2': [Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(
df.loc[df.index[0], 'Date2'])
result = df.fillna(value={'Date': df['Date2']})
assert_frame_equal(result, expected)
# with timezone
# GH 15855
df = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT]})
exp = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')]})
assert_frame_equal(df.fillna(method='pad'), exp)
df = pd.DataFrame({'A': [pd.NaT,
pd.Timestamp('2012-11-11 00:00:00+01:00')]})
exp = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')]})
assert_frame_equal(df.fillna(method='bfill'), exp)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
df = pd.DataFrame({'a': [1., np.nan]})
result = df.fillna(0, downcast='infer')
expected = pd.DataFrame({'a': [1, 0]})
assert_frame_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
df = pd.DataFrame({'a': [1., np.nan]})
result = df.fillna({'a': 0}, downcast='infer')
expected = pd.DataFrame({'a': [1, 0]})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = df.get_dtype_counts().sort_values()
expected = Series({'object': 5})
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = result.get_dtype_counts().sort_values()
expected = Series({'int64': 5})
assert_series_equal(result, expected)
# empty block
df = DataFrame(index=lrange(3), columns=['A', 'B'], dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan', index=lrange(3), columns=['A', 'B'])
assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame(dict(A=[1, np.nan], B=[1., 2.]))
for v in ['', 1, np.nan, 1.0]:
expected = df.replace(np.nan, v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_fillna_datetime_columns(self):
# GH 7095
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), pd.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), '?'],
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=pd.date_range('20130110', periods=3))
tm.assert_frame_equal(result, expected)
def test_ffill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.ffill(),
self.tsframe.fillna(method='ffill'))
def test_bfill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.bfill(),
self.tsframe.fillna(method='bfill'))
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
df = DataFrame(np.random.randn(10, 4).astype(int))
# it works!
df.fillna(np.nan)
def test_fillna_inplace(self):
df = DataFrame(np.random.randn(10, 4))
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(value=0)
assert expected is not df
df.fillna(value=0, inplace=True)
tm.assert_frame_equal(df, expected)
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
assert expected is not df
df.fillna(method='ffill', inplace=True)
tm.assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]})
result = df.fillna({'a': 0, 'b': 5})
expected = df.copy()
expected['a'] = expected['a'].fillna(0)
expected['b'] = expected['b'].fillna(5)
assert_frame_equal(result, expected)
# it works
result = df.fillna({'a': 0, 'b': 5, 'd': 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
assert_frame_equal(result, expected)
# disable this for now
with tm.assert_raises_regex(NotImplementedError,
'column by column'):
df.fillna(df.max(1), axis=1)
def test_fillna_dataframe(self):
# GH 8377
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
# df2 may have different index and columns
df2 = DataFrame({'a': [nan, 10, 20, 30, 40],
'b': [50, 60, 70, 80, 90],
'foo': ['bar'] * 5},
index=list('VWXuZ'))
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame({'a': [nan, 1, 2, nan, 40],
'b': [1, 2, 3, nan, 90],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
assert_frame_equal(result, expected)
def test_fillna_columns(self):
df = DataFrame(np.random.randn(10, 10))
df.values[:, ::2] = np.nan
result = df.fillna(method='ffill', axis=1)
expected = df.T.fillna(method='pad').T
assert_frame_equal(result, expected)
df.insert(6, 'foo', 5)
result = df.fillna(method='ffill', axis=1)
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
def test_fillna_invalid_method(self):
with tm.assert_raises_regex(ValueError, 'ffil'):
self.frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
pytest.raises(TypeError, self.frame.fillna, [1, 2])
# tuple
pytest.raises(TypeError, self.frame.fillna, (1, 2))
# frame with series
pytest.raises(ValueError, self.frame.iloc[:, 0].fillna, self.frame)
def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.rand(20, 5)
df = DataFrame(index=lrange(20), columns=cols, data=data)
filled = df.fillna(method='ffill')
assert df.columns.tolist() == filled.columns.tolist()
def test_fill_corner(self):
mf = self.mixed_frame
mf.loc[mf.index[5:20], 'foo'] = nan
mf.loc[mf.index[-10:], 'A'] = nan
filled = self.mixed_frame.fillna(value=0)
assert (filled.loc[filled.index[5:20], 'foo'] == 0).all()
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
# TODO(wesm): unused?
result = empty_float.fillna(value=0) # noqa
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = DataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
assert_frame_equal(res, exp)
class TestDataFrameInterpolate(TestData):
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with pytest.raises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with pytest.raises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected.A.loc[3] = 2.81547781
expected.A.loc[13] = 5.52964175
else:
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
if _is_scipy_ge_0190:
expected.A.loc[3] = 2.82150771
expected.A.loc[13] = 6.12648668
else:
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.loc[2, 'A'] = 3
expected.loc[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.loc[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.loc[5, 'A'] = 6.0
else:
expected.loc[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with pytest.raises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
| bsd-3-clause |
HyukjinKwon/spark | python/pyspark/pandas/tests/test_categorical.py | 14 | 16649 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class CategoricalTest(PandasOnSparkTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(
["b", "a", "c", "c", "b", "a"], categories=["c", "b", "d", "a"]
),
},
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
return self.pdf, self.psdf
def test_categorical_frame(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b, pdf.b)
self.assert_eq(psdf.index, pdf.index)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
def test_categorical_series(self):
pser = pd.Series([1, 2, 3], dtype="category")
psser = ps.Series([1, 2, 3], dtype="category")
self.assert_eq(psser, pser)
self.assert_eq(psser.cat.categories, pser.cat.categories)
self.assert_eq(psser.cat.codes, pser.cat.codes)
self.assert_eq(psser.cat.ordered, pser.cat.ordered)
def test_astype(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(psser.astype("category"), pser.astype("category"))
self.assert_eq(
psser.astype(CategoricalDtype(["c", "a", "b"])),
pser.astype(CategoricalDtype(["c", "a", "b"])),
)
pcser = pser.astype(CategoricalDtype(["c", "a", "b"]))
kcser = psser.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(kcser.astype("category"), pcser.astype("category"))
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pcser.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pser.astype(CategoricalDtype(["b", "c", "a"])),
)
self.assert_eq(kcser.astype(str), pcser.astype(str))
def test_factorize(self):
pser = pd.Series(["a", "b", "c", None], dtype=CategoricalDtype(["c", "a", "d", "b"]))
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pser.factorize(na_sentinel=-2)
kcodes, kuniques = psser.factorize(na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
def test_frame_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.apply(lambda x: x).sort_index(), pdf.apply(lambda x: x).sort_index())
self.assert_eq(
psdf.apply(lambda x: x, axis=1).sort_index(),
pdf.apply(lambda x: x, axis=1).sort_index(),
)
def test_frame_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply()
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c"])
def categorize(ser) -> ps.Series[dtype]:
return ser.astype(dtype)
self.assert_eq(
psdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
pdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.transform(lambda x: x), pdf.transform(lambda x: x))
self.assert_eq(psdf.transform(lambda x: x.cat.codes), pdf.transform(lambda x: x.cat.codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.transform(lambda x: x.astype(dtype)).sort_index(),
pdf.transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_frame_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform()
pdf, psdf = self.df_pair
def codes(pser) -> ps.Series[np.int8]:
return pser.cat.codes
self.assert_eq(psdf.transform(codes), pdf.transform(codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.transform(to_category).sort_index(), pdf.transform(to_category).sort_index()
)
def test_series_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.apply(lambda x: x).sort_index(), pdf.a.apply(lambda x: x).sort_index()
)
def test_series_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_apply()
pdf, psdf = self.df_pair
ret = psdf.a.dtype
def identity(pser) -> ret:
return pser
self.assert_eq(psdf.a.apply(identity).sort_index(), pdf.a.apply(identity).sort_index())
# TODO: The return type is still category.
# def to_str(x) -> str:
# return str(x)
#
# self.assert_eq(
# psdf.a.apply(to_str).sort_index(), pdf.a.apply(to_str).sort_index()
# )
def test_groupby_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").apply(lambda df: df).sort_index(),
pdf.groupby("a").apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
pdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
pdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
pdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
pdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
)
# TODO: grouping by a categorical type sometimes preserves unused categories.
# self.assert_eq(
# psdf.groupby("a").apply(len).sort_index(), pdf.groupby("a").apply(len).sort_index(),
# )
def test_groupby_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_apply()
pdf, psdf = self.df_pair
def identity(df) -> ps.DataFrame[zip(psdf.columns, psdf.dtypes)]:
return df
self.assert_eq(
psdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
pdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_groupby_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").transform(lambda x: x).sort_index(),
pdf.groupby("a").transform(lambda x: x).sort_index(),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
pdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_groupby_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_transform()
pdf, psdf = self.df_pair
def identity(x) -> ps.Series[psdf.b.dtype]: # type: ignore
return x
self.assert_eq(
psdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def astype(x) -> ps.Series[dtype]:
return x.astype(dtype)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
)
else:
expected = pdf.groupby("a").transform(astype)
expected["b"] = dtype.categories.take(expected["b"].cat.codes).astype(dtype)
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
expected.sort_values("b").reset_index(drop=True),
)
def test_frame_apply_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
def test_frame_apply_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_str).sort_values(["a", "b"]).reset_index(drop=True),
to_str(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_category)
.sort_values(["a", "b"])
.reset_index(drop=True),
to_category(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.cat.codes).sort_index(),
pdf.b.cat.codes.sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.astype(dtype)).sort_index(),
pdf.b.astype(dtype).sort_index(),
)
def test_frame_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_str).sort_index(),
to_str(pdf).sort_index(),
)
def to_codes(pdf) -> ps.Series[np.int8]:
return pdf.b.cat.codes
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_codes).sort_index(),
to_codes(pdf).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).sort_index(),
)
def to_category(pdf) -> ps.Series[dtype]:
return pdf.b.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).rename().sort_index(),
)
def test_series_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(str)).sort_index(),
pdf.a.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(dtype)).sort_index(),
pdf.a.astype(dtype).sort_index(),
)
def test_series_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_transform_batch()
pdf, psdf = self.df_pair
def to_str(pser) -> ps.Series[str]:
return pser.astype(str)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_str).sort_index(), to_str(pdf.a).sort_index()
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf.a).sort_index(),
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_categorical import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
sarahgrogan/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
jrosebr1/bat-country | setup.py | 6 | 1106 | from distutils.core import setup
setup(
name='bat-country',
packages=['batcountry'],
version='0.2',
description='A lightweight, extendible, easy to use Python package for deep dreaming and image generation with Caffe and CNNs',
author='Adrian Rosebrock',
author_email='[email protected]',
url='https://github.com/jrosebr1/bat-country',
download_url='https://github.com/jrosebr1/bat-country/tarball/0.1',
license='MIT',
install_requires=[
'Pillow==2.9.0',
'argparse==1.2.1',
'decorator==3.4.2',
'imutils==0.2.2',
'matplotlib==1.4.3',
'mock==1.0.1',
'networkx==1.9.1',
'nose==1.3.7',
'numpy==1.9.2',
'protobuf==2.6.1',
'pyparsing==2.0.3',
'python-dateutil==2.4.2',
'pytz==2015.4',
'scikit-image==0.11.3',
'scipy==0.15.1',
'six==1.9.0',
'wsgiref==0.1.2',
],
keywords=['computer vision', 'machine learning', 'deep learning',
'convolutional neural network', 'deep dream', 'inceptionism'],
classifiers=[],
)
| mit |
Bmillidgework/Misc-Maths | gp.py | 1 | 1528 | # okay, ehre is where I'm going to try to do my gaussian processes beginning in python.
#I'm first going to attempt to do a simple GP prior draw. from there we will move onto inference in toy problems, and hopefully beyond.
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
N = 100
l=0.01
#this generates my xgrid. it's probably really inefficient, but hey.
xgrid = []
for i in xrange(N):
xgrid.append(-0.5+(1/N)*i)
xgrid = np.array(xgrid)
print xgrid.shape
def kernel(x,y,l):
return np.exp(-(x-y)**2/l)
#this is a very simple pseudogaussian kernel. should be pretty straightforward.
def construct_covmatrix(xgrid,l):
sigma = []
for i in xrange(len(xgrid)):
col = []
for j in xrange(len(xgrid)):
col.append(kernel(xgrid[i],xgrid[j],l))
col = np.array(col)
sigma.append(col)
sigma=np.array(sigma)
return sigma
sigma = construct_covmatrix(xgrid,l)
def drawGpPrior(N,covmatrix): # we're assuming zero mean for now!
mus = np.zeros(N)
print mus.shape
draw = np.random.multivariate_normal(mus,covmatrix)
return draw
prior = drawGpPrior(N,sigma)
plt.plot(xgrid,prior)
plt.show()
#Wow! Holy crap it actually works. This straight up gives me a good prior draw. yay. So the distribution over such draws of functions is our prior. That actually makes sense.The multivariate normal is our distribution.Also, this isn't saved is it. That's def something we nered to do before we go to forresthill or something, so this can update the file sysetems. lert's now get back to work
| mit |
fboers/jumegX | epocher/jumeg_epocher_events.py | 1 | 44756 | '''Class JuMEG_Epocher_Events
Class to extract event/epoch information and save to hdf5
Author:
Frank Boers <[email protected]>
----------------------------------------------------------------
extract mne-events per condition, save to HDF5 file
#--- example via obj:
from jumeg.epocher.jumeg_epocher import jumeg_epocher
epocher= {"template_name": "LDAEP",
"fif_extention": ".fif",
"verbose":True,
"save": True}
fname=test.fif
raw=None
fname,raw,fhdf = jumeg_epocher.apply_events_to_hdf(fname, raw=raw,**epocher)
---> update 10.01.2017 FB
check event-code/conditions for none existing
'''
import os
import warnings
import numpy as np
import pandas as pd
# import matplotlib.pylab as pl
import mne
from jumeg.jumeg_base import jumeg_base
from jumeg.epocher.jumeg_epocher_hdf import JuMEG_Epocher_HDF
class JuMEG_Epocher_Events(JuMEG_Epocher_HDF):
def __init__ (self):
super(JuMEG_Epocher_Events, self).__init__()
self.__rt_type_list = ['MISSED', 'TOEARLY', 'WRONG', 'HIT']
self.__data_frame_stimulus_cols = ['id','onset','offset']
self.__data_frame_response_cols = ['rt_type','rt','rt_id','rt_onset','rt_offset','rt_index','rt_counts','bads','selected','weighted_selected']
self.__stat_postfix = '-epocher-stats.csv'
self.__idx_bad = -1
#---
def __get_idx_bad(self):
return self.__idx_bad
idx_bad = property(__get_idx_bad)
#---
def __get_data_frame_stimulus_cols(self):
return self.__data_frame_stimulus_cols
def __set_data_frame_stimulus_cols(self,v):
self.__data_frame_stimulus_cols = v
data_frame_stimulus_cols = property(__get_data_frame_stimulus_cols,__set_data_frame_stimulus_cols)
#---
def __get_data_frame_response_cols(self):
return self.__data_frame_response_cols
def __set_data_frame_response_cols(self,v):
self.__data_frame_response_cols = v
data_frame_response_cols = property(__get_data_frame_response_cols,__set_data_frame_response_cols)
#--- rt_type list: 'MISSED', 'TOEARLY', 'WRONG', 'HIT'
def __get_rt_type_list(self):
return self.__rt_type_list
rt_type_list = property(__get_rt_type_list)
#--- rt type index: 'MISSED', 'TOEARLY', 'WRONG', 'HIT'
def rt_type_as_index(self,s):
return self.__rt_type_list.index( s.upper() )
def __get_idx_missed(self):
return self.__rt_type_list.index( 'MISSED')
idx_missed = property(__get_idx_missed)
def __get_idx_toearly(self):
return self.__rt_type_list.index( 'TOEARLY')
idx_toearly = property(__get_idx_toearly)
def __get_idx_wrong(self):
return self.__rt_type_list.index( 'WRONG')
idx_wrong = property(__get_idx_wrong)
def __get_idx_hit(self):
return self.__rt_type_list.index( 'HIT')
idx_hit = property(__get_idx_hit)
#--- events stat file (output as csv)
def __set_stat_postfix(self, v):
self.__stat_postfix = v
def __get_stat_postfix(self):
return self.__stat_postfix
stat_postfix = property(__get_stat_postfix,__set_stat_postfix)
#---
def events_find_events(self,raw,**param):
"""
some how calls <mne.find_events()>
input:
raw obj,
e.g. parameters:
{'event_id': 40, 'and_mask': 255,
'events': {'consecutive': True, 'output':'step','stim_channel': 'STI 014',
'min_duration':0.002,'shortest_event': 2,'mask': 0}
}
return:
pandas data-frame with epoch event structure for stimulus or response channel
id : event id
offset : np array with TSL event code offset
onset : np array with TSL event code onset
counts : number of events
bads : np.array with index of bad events
#=> RESPONSE MATCHING
#rt_type : NAN => np array with values MISSED=0 ,TOEARLY=1,WRONG=2,HIT=3
#rt : NAN => np array with reaction time [TSL]
#rt_onset: NAN => np array with response onset [TSL]
#rt_id : NAN => np array with response key / id
dict() with event structure for stimulus or response channel
sfreq : sampling frequency => raw.info['sfreq']
duration : {mean,min,max} in TSL
system_delay_is_applied : True/False
--> if true <system_delay_ms> converted to TSLs and added to the TSLs in onset,offset
(TSL => timeslices , your samples)
"""
if raw is None:
print "ERROR in <get_event_structure: No raw obj \n"
return None,None
#---
# import pandas as pd done
df = pd.DataFrame(columns = self.data_frame_stimulus_cols)
ev_id_idx = np.array([])
ev_onset = np.array([])
ev_offset = np.array([])
#---
events = param['events'].copy()
events['output'] = 'step'
ev = mne.find_events(raw, **events) #-- return int64
#--- apply and mask e.g. 255 get the first 8 bits in Trigger channel
if param['and_mask']:
ev[:, 1:] = np.bitwise_and(ev[:, 1:], param['and_mask'])
ev[:, 2:] = np.bitwise_and(ev[:, 2:], param['and_mask'])
ev_onset = np.squeeze( ev[np.where( ev[:,2] ),:]) # > 0
ev_offset = np.squeeze( ev[np.where( ev[:,1] ),:])
if param['event_id']:
ev_id = jumeg_base.str_range_to_numpy(param['event_id'],exclude_zero=True)
#ev_onset = np.squeeze( ev[np.where( np.in1d( ev[:,2],ev_id ) ), :])
#ev_offset = np.squeeze( ev[np.where( np.in1d( ev[:,1],ev_id ) ), :])
#--- check if code in events
if ( ev_id in np.unique(ev[:, 2]) ):
ev_id_idx = np.squeeze( np.where( np.in1d( ev_onset[:,2],ev_id )))
if ( ev_id_idx.size > 0 ):
ev_onset = ev_onset[ ev_id_idx,:]
ev_offset= ev_offset[ev_id_idx,:]
else:
print'Warning => No such event code(s) found (ev_id_idx) -> event: ' + str( param['event_id'] )
return None,None
else:
print'Warning => No such event code(s) found (ev_id) -> event: ' + str(param['event_id'])
return None,None
#---- use all event ids
if ( ev_onset.size == 0 ):
print'Warning => No such event code(s) found -> event: ' + str(param['event_id'])
return None,None
#--- apply system delay if is defined e.g. auditory take`s 20ms to subjects ears
if param['system_delay_ms']:
system_delay_tsl = raw.time_as_index( param['system_delay_ms']/1000 ) # calc in sec
ev_onset[:, 0] += system_delay_tsl
ev_offset[:, 0]+= system_delay_tsl
system_delay_is_applied = True
else:
system_delay_is_applied = False
#-- avoid invalid index/dimension error if last offset is none
df['id'] = ev_onset[:,2]
df['onset'] = ev_onset[:,0]
df['offset'] = np.zeros( ev_onset[:,0].size,dtype=np.long )
div = np.zeros( ev_offset[:,0].size )
try:
if ( ev_onset[:,0].size >= ev_offset[:,0].size ):
div = ev_offset[:,0] - ev_onset[:ev_offset[:,0].size,0]
df['offset'][:ev_offset[:,0].size] = ev_offset[:,0]
else:
idx_max = ev_offset[:,0].size
div = ev_offset[:,0] - ev_onset[:idx_max,0]
df['offset'][:] = ev_offset[:idx_max,0]
except:
assert "ERROR dims onset offset will not fit\n"
print ev_onset[:,0].size
print ev_offset[:,0].size
return df,dict( {
'sfreq' : raw.info['sfreq'],
'duration' :{'mean':np.rint(div.mean()),'min':div.min(),'max':div.max()},
'system_delay_is_applied' : system_delay_is_applied
} )
#---
def events_response_matching(self,raw,stim_df=None,resp_df=None, **param ):
"""
matching correct responses with respect to <stimulus channel> <output type> (onset,offset)
input:
stim_df = <stimulus channel data frame>
res_df = <response channel data frame>
param : parameter dict
e.g.:{ 'response':{'counts':1,'window':[0,0.5],'event_id':2,'include_early_ids':[1,4],'events':{'output':'onset'} },
'stimulus':{'events':{'output'.'onset'}} }
return:
<stimulus data frame> pandas DataFrame obj
with added cols
rt_type : response type MISSED,EARLY,WRONG,HIT
rt : response time as tsl (RT)
rt_id : event id button press code
rt_onset : response onset [tsl]
rt_offset : response offset [tsl]
rt_index : index in response onset / offset
rt_counts : number of responses in <response time window>
bads : flag for bad epochs; e.g. later use in combination with ecg/eog events
"""
#--- ck errors
err_msg = None
if raw is None:
err_msg = "ERROR in <apply_response_matching> : no raw obj \n"
if (stim_df is None):
err_msg +="\nERROR no Stimulus-Data-Frame obj. provided\n"
if (resp_df is None):
err_msg +="\nERROR no Response-Data-Frame obj. provided\n"
if (param is None):
err_msg +="\nERROR no Parameter obj. provided\n"
#--- ck RT window range
if ( param['response']['window'][0] >= param['response']['window'][1] ):
err_msg += "ERROR in parameter response windows\n"
if err_msg:
print "ERROR in <apply_response_matching>\n" + err_msg +"\n\n"
return None
#--- extend stimulus data frame
for x in self.data_frame_response_cols :
stim_df[x]= 0 #np.NaN
#--- convert rt window [ms] into tsl
(r_window_tsl_start, r_window_tsl_end ) = raw.time_as_index( param['response']['window'] );
#--- get respose code -> event_id [int or string] as np array
r_event_id = jumeg_base.str_range_to_numpy( param['response']['event_id'] )
#--- ck if any toearly-id is defined, returns None if not
r_event_id_toearly = jumeg_base.str_range_to_numpy( param['response']['include_early_ids'] )
#--- get output type: onset or offset
stim_output_type = param['stimulus']['events']['output']
resp_output_type = param['response']['events']['output']
#--- loop for all stim events
ridx = 0
#--- get rt important part of respose df
RESP_TSLs = resp_df[resp_output_type]
for idx in stim_df.index :
st_tsl_onset = stim_df[stim_output_type][idx]
st_window_tsl0 = stim_df[stim_output_type][idx] + r_window_tsl_start
st_window_tsl1 = stim_df[stim_output_type][idx] + r_window_tsl_end
# st_id = stim_df['id'][idx]
#--- check for to TOEARLY responses
toearly_tsl0 = st_tsl_onset
toearly_tsl1 = st_window_tsl0
#--- look for part of response dataframe ...
res_df_early = resp_df[(toearly_tsl0 <= RESP_TSLs ) & ( RESP_TSLs < toearly_tsl1 )]
if res_df_early.index.size > 0 :
if not res_df_early.isin( r_event_id_toearly ).all :
ridx = res_df_early.index[0]
stim_df.rt_counts[idx] = res_df_in.index.size
stim_df.rt_onset[idx] = resp_df.onset[ridx]
stim_df.rt_offset[idx] = resp_df.offset[ridx]
stim_df.rt_id[idx] = resp_df.id[ridx]
stim_df.rt_index[idx] = ridx
stim_df.rt_type[idx] = self.idx_toearly
continue
#--- find index of responses from window-start till end of res_event_type array [e.g. onset / offset]
# res_df_in = resp_df[ ( st_window_tsl0 <= RESP_TSLs ) & ( RESP_TSLs <= st_window_tsl1) ]
resp_df_in_idx = resp_df[ ( st_window_tsl0 <= RESP_TSLs ) & ( RESP_TSLs <= st_window_tsl1) ].index
#--- MISSED response
if not np.any( resp_df_in_idx ):
continue
#--- WRONG or HIT;
# if res_df_in.index.size > 0 :
# ridx = res_df_in.index[0]
if resp_df_in_idx.size > 0 :
ridx = resp_df_in_idx[0]
stim_df.rt_counts[idx] = resp_df_in_idx.size
stim_df.rt_onset[idx] = resp_df.onset[ridx]
stim_df.rt_offset[idx] = resp_df.offset[ridx]
stim_df.rt_type[idx] = self.idx_wrong
stim_df.rt_id[idx] = resp_df.id[ridx]
#stim_df.rt_id[idx] = resp_df.id[resp_df_in_idx].max()
#stim_df.rt_id[idx] = np.bitwise_and(resp_df.id[resp_df_in_idx], 72).max()
stim_df.rt_index[idx] = ridx
#--- HIT; ck number of responses; ck pressed buttons; wrong if more than count
if ( stim_df.rt_counts[idx] <= param['response']['counts'] ):
#if np.all( res_df_in.id.isin( r_event_id ) ) :
if np.all( resp_df.id[ resp_df_in_idx].isin( r_event_id ) ) :
stim_df.rt_type[idx] = self.idx_hit
# if (stim_df.rt_id[idx] <1) and(stim_df.rt_type[idx] == self.idx_wrong):
# print"!!ERROR"
# print resp_df_in_idx
# print resp_df.onset[resp_df_in_idx]
# print resp_df.id[resp_df_in_idx]
# assert "erroe rt"
#--- MISSED response
else:
continue
#--- calc reaction time (rt in tsl)
if stim_output_type == 'offset' :
sto = stim_df.offset
else:
sto = stim_df.onset
if resp_output_type == 'offset' :
rto = stim_df.rt_offset
else:
rto = stim_df.rt_onset
stim_df.rt = rto - sto
stim_df.rt[ (stim_df.rt_type == self.idx_missed) ] = 0
#---
if self.verbose:
for kidx in range( len( self.rt_type_list ) ) :
print "\n\n---> Stimulus DataFrame Type: " + self.rt_type_list[kidx]
ddf = stim_df[ stim_df.rt_type == kidx ]
if ddf.index.size > 0 :
print ddf.describe()
else :
print "---> EMPTY"
print"---------------------------"
print "\n\n"
#import sys
#char = sys.stdin.read(1)
return stim_df
#---
def events_store_to_hdf(self,raw,condition_list=None,overwrite_hdf=False):
"""
find & store epocher data to hdf5:
-> readding parameter from epocher template file
-> find events from raw-obj using mne.find_events
-> apply response matching if is true
-> save results in pandas dataframes & HDF fromat
input:
raw : raw obj
condition_list: list of conditions to process
select special conditions from epocher template
default: condition_list=None , will process all
overwrite_hdf : flag for overwriting output HDF file
default: overwrite_hdf=True
return:
HDF filename
"""
import pandas as pd
#--- init obj
# overwrite_hdf=False
self.hdf_obj_init(raw=raw,overwrite=overwrite_hdf)
#--- condi loop
for condi, param, in self.template_data.iteritems():
#--- check for real condition
if condi == 'default': continue
#--- check for condition in list
if condition_list :
if condi not in condition_list: continue
print '===> start condition: '+ condi
#--- update & merge condi parameter with defaults
parameter = self.template_data['default'].copy()
parameter = self.template_update_and_merge_dict(parameter,param)
#--- stimulus init dict's & dataframes
stimulus_info = dict()
stimulus_data_frame = None
if self.verbose:
print'===>EPOCHER Template: %s Condition:%s' %(self.template_name,condi)
print'find events and epochs, generate epocher output HDF5'
print"\n---> Parameter :"
print parameter
print"\n"
#--- select stimulus channel e.g. "stimulus" -> STIM 014 or "response" ->STIM 013
if parameter['stimulus_channel'] in ['stimulus','response'] :
print"STIMULUS CHANNEL -> find events: "+ condi +" ---> "+ parameter['stimulus_channel']
if self.verbose:
print "---> Stimulus Channel Parameter:"
print parameter[ parameter['stimulus_channel'] ]
print"\n\n"
#--- get stimulus channel epochs from events as pandas data-frame
stimulus_data_frame,stimulus_info = self.events_find_events(raw,**parameter[ parameter['stimulus_channel'] ])
if self.verbose:
print "---> Stimulus Epocher Events Data Frame [stimulus channel]: "+ condi
print stimulus_data_frame
print"\n\n"
if stimulus_data_frame is None: continue
#--- RESPONSE Matching task
#--- match between stimulus and response
#--- get all response events for condtion e.g. button press 4
if parameter['response_matching'] :
print"RESPONSE MATCHING -> matching stimulus & response channel: " + condi
print"stimulus channel : " + parameter['stimulus_channel']
print"response channel : " + parameter['response_channel']
#--- look for all responses => 'event_id' = None
res_param = parameter[ parameter['response_channel'] ].copy()
res_param['event_id'] = None
#--- get epochs from events as pandas data-frame
response_data_frame,response_info = self.events_find_events(raw,**res_param)
if self.verbose:
print "---> Response Epocher Events Data Frame [respnse channel] : " + parameter['response_channel']
print response_data_frame
print"\n\n"
#--- update stimulus epochs with response matching
stimulus_data_frame = self.events_response_matching(raw,stimulus_data_frame,response_data_frame,**parameter )
#--- store dataframe to HDF format
else:
stimulus_data_frame['bads']= np.zeros_like( stimulus_data_frame['onset'],dtype=np.int8 )
#--- for later mark selected epoch as 1
stimulus_data_frame['selected'] = np.zeros_like( stimulus_data_frame['onset'],dtype=np.int8 )
stimulus_data_frame['weighted_selected']= np.zeros_like( stimulus_data_frame['onset'],dtype=np.int8 )
key = '/epocher/'+condi
storer_attrs = {'epocher_parameter': parameter,'info_parameter':stimulus_info}
self.hdf_obj_update_dataframe(stimulus_data_frame.astype(np.int32),key=key,**storer_attrs )
#--- write stat info into hdf and as csv/txt
df_stats = self.events_condition_stats(save=True)
key = '/conditions/statistic/'
storer_attrs = {'epocher_parameter': parameter,'info_parameter':stimulus_info}
self.hdf_obj_update_dataframe(df_stats.astype(np.float32),key=key,**storer_attrs )
fhdf= self.HDFobj.filename
self.HDFobj.close()
print" ---> DONE save epocher data into HDF5 :"
print" --> " + fhdf +"\n\n"
return fhdf
#--
def events_condition_stats(self,save=False):
"""
return:
<pandas data frame>
"""
import pandas as pd
#--- ck error
if not self.hdf_obj_is_open():
assert "ERROR no HDF obj open\n"
return
#---
#cols = ['EvID','Hit','Wrong', 'TOEarly', 'Missed', 'RTmean','RTmedian','RTstd', 'RTmin', 'RTmax']
cols = ['STID','RTID','Hit','Wrong', 'TOEarly', 'Missed', 'RTmean','RTmedian','RTstd', 'RTmin', 'RTmax']
#Index([u'id', u'onset', u'offset', u'rt_type', u'rt', u'rt_id', u'rt_onset', u'rt_offset', u'rt_index', u'rt_counts', u'bads'], dtype='object')
index_keys= []
for w in self.HDFobj.keys():
if w.startswith('/epocher'):
index_keys.append( w.replace('/epocher', '').replace('/', '') )
df_stat = pd.DataFrame(index=index_keys,columns = cols)
# s = Series(randn(5), index=['a', 'b', 'c', 'd', 'e'])
#d = {'one' : Series([1., 2., 3.], index=['a', 'b', 'c']),
# ....: 'two' : Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
idx = 0
for condi in index_keys:
k='/epocher/'+condi
#print k
#print condi
df = self.HDFobj[k]
# df_stat['EvID'][idx] = condi
#--- get sampling frquency from attrs epocher_info
Hstorer = self.HDFobj.get_storer(k)
try:
sfreq = Hstorer.attrs.info_parameter['sfreq']
except:
sfreq = 1.0
try:
rtm = Hstorer.attrs.epocher_parameter['response_matching']
except:
# df_stat['EvID'][idx] = np.array_str( np.unique(df.id) )
df_stat['Hit'][idx] = df.id.size
rtm = False
df_stat['STID'][idx] = np.unique(df.id)[0]
df_stat['RTID'][idx] = 0.0
if rtm :
#--- missed
missed_idx = df[ df['rt_type'] == self.idx_missed ].index
if missed_idx.size :
df_stat['Missed'][idx] = missed_idx.size
#--- early
toearly_idx = df[ df['rt_type'] == self.idx_toearly ].index
if toearly_idx.size :
df_stat['TOEarly'][idx] = toearly_idx.size
#--- wrong
wrong_idx = df[ df['rt_type'] == self.idx_wrong ].index
if wrong_idx.size:
df_stat['Wrong'][idx] = wrong_idx.size
#--- hit
hit_idx = df[ df['rt_type'] == self.idx_hit ].index
if hit_idx.size:
df_stat['Hit'][idx] = hit_idx.size
df_stat['RTID'][idx] = df['rt_id'][ hit_idx[0] ]
#--- RTmean
df_stat['RTmean'][idx] = df.rt[hit_idx].mean() / sfreq
df_stat['RTmedian'][idx] = df.rt[hit_idx].median() / sfreq
df_stat['RTstd'][idx] = df.rt[hit_idx].std() / sfreq
df_stat['RTmin'][idx] = df.rt[hit_idx].min() / sfreq
df_stat['RTmax'][idx] = df.rt[hit_idx].max() / sfreq
else:
df_stat['STID'][idx] = np.unique(df.id)[0]
df_stat['Hit'][idx] = df.id.size
idx += 1
#--- save stats data frame to csv
fcsv = None
if save:
fcsv = self.HDFobj.filename.replace(self.hdf_postfix,self.stat_postfix)
#--- float formating not working due to pandas float_format bug 12.12.14
df_stat.to_csv(fcsv,na_rep='0',float_format='%.3f')
if self.verbose :
print"\n --> Condition Statistic Data Frame\n"
print df_stat
print"\n\n"
if save :
print " --> Stat DataFrame saved as: "+ fcsv
return df_stat
def events_export_events(self,raw=None,fhdf=None,condition_list=None,fif_postfix="evt",
event_extention=".eve",picks=None,reject=None,proj=False,
save_condition={"events":True,"epochs":True,"evoked":True},
time={"time_pre":None,"time_post":None,"baseline":None},
baseline_correction={"type":None,"channel":None,"output":None,"baseline":None},
exclude_events = None,weights=None ):
'''
raw=None,fhdf=None,condition_list=None,fif_postfix="evt",
event_extention=".eve",picks=None,reject=None,proj=False,
save_condition={"events":True,"epochs":True,"evoked":True},
time={"time_pre":None,"time_post":None,"baseline":None},
baseline_correction={"type":None,"channel":None,"output":None,"baseline":None},
exclude_events = None,weights=None
'''
if raw:
self.raw = raw
#--- get HDF obj
if fhdf:
self.HDFobj = pd.HDFStore(fhdf)
else:
if self.raw:
self.HDFobj = self.hdf_obj_open(raw=self.raw)
if not self.hdf_obj_is_open():
assert "ERROR could not open HDF file:\n --> raw: "+self.raw.filename+"\n --> hdf: "+self.HDFObj.filename+"\n"
epocher_condition_list = self.hdf_get_key_list(node='/epocher',key_list=condition_list)
event_id = dict()
time_param = dict()
for k in time:
if time[k]:
time_param[k]= time[k]
bc_param = dict()
for k in baseline_correction:
if baseline_correction[k]:
bc_param[k]= baseline_correction[k]
#--- init exclude_events e.g. eog onset
exclude_events = self.events_update_artifact_time_window(aev=exclude_events)
for condi in epocher_condition_list:
evt = self.events_hdf_to_mne_events(condi,exclude_events=exclude_events,time=time_param,baseline=bc_param,weights=None)
if evt['events'].size:
event_id[condi] = {'id':evt['event_id'],'trials': evt['events'].shape[0],'trials_weighted':0}
#---
ep,bc = self.events_apply_epochs_and_baseline(self.raw,evt=evt,reject=reject,proj=proj,picks=picks)
self.events_save_events(evt=evt,condition=condi,postfix=fif_postfix,picks=picks,reject=reject,proj=proj,save_condition=save_condition)
#--- ck weighted events
# "weights":{"mode":"equal_counts","selection":"median","skipp_first":null},
if hasattr(weights,'mode') :
print "\n ---> Applying Weighted Export Events"
if weights['mode'] == 'equal':
weights['min_counts'] = event_id[ event_id.keys()[0] ]['trials']
for condi in event_id.keys() :
if event_id[ condi ]['trials'] < weights['min_counts']:
weights['min_counts'] = event_id[ condi ]['trials']
for condi in event_id.keys():
evt = self.events_hdf_to_mne_events(condi,exclude_events=exclude_events,time=time_param,baseline=bc_param,weights=weights)
#---
if evt['events'].size:
ep,bc = self.events_apply_epochs_and_baseline(self.raw,evt=evt,reject=reject,proj=proj,picks=picks)
event_id[condi]['trials_weighted'] = evt['events'].shape[0]
self.events_save_events(evt=evt,condition=condi,postfix=fif_postfix+'W',picks=picks,reject=reject,proj=proj,save_condition=save_condition)
fhdf = self.HDFobj.filename
self.HDFobj.close()
return event_id
def events_save_events(self,evt=None,condition=None,postfix="evt",
picks=None,reject=None,proj=False,
save_condition={"events":True,"epochs":True,"evoked":True}):
from jumeg.preprocbatch.jumeg_preprocbatch_plot import jumeg_preprocbatch_plot as jplt
jplt.verbose = self.verbose
ep,bc = self.events_apply_epochs_and_baseline(self.raw,evt=evt,reject=reject,proj=proj,picks=picks)
postfix += '_' + condition
if bc:
postfix += '_bc'
#--- save events to txt file
if save_condition["events"]:
fname = jumeg_base.get_fif_name(raw=self.raw,postfix=postfix,extention=".eve",update_raw_fname=False)
mne.event.write_events( fname,evt['events'] )
print" ---> done jumeg epocher save events as => EVENTS :" +fname
#--- save epoch data
if save_condition["epochs"]:
fname = jumeg_base.get_fif_name(raw=self.raw,postfix=postfix,extention="-epo.fif",update_raw_fname=False)
ep.save( fname )
print" ---> done jumeg epocher save events as => EPOCHS :" +fname
#--- save averaged data
if save_condition["evoked"]:
fname = jumeg_base.get_fif_name(raw=self.raw,postfix=postfix,extention="-ave.fif",update_raw_fname=False)
mne.write_evokeds( fname,ep.average() )
print" ---> done jumeg epocher save events as => EVOKED (averaged) :" +fname
fname = jumeg_base.get_fif_name(raw=self.raw,postfix=postfix,extention="-ave",update_raw_fname=False)
#--- plot evoked
fname = jplt.plot_evoked(ep,fname=fname,condition=condition,show_plot=False,save_plot=True,plot_dir='plots')
print" ---> done jumeg epocher plot evoked (averaged) :" +fname
def events_hdf_to_mne_events(self,condi,exclude_events=None,time=None,baseline=None,weights=None):
'''
export HDF events to mne events structure
input:
condition name
exclude_events = None
time = None
weights:{"mode":"equal_counts","selection":"median","skipp_first":null,min_counts=None},
return:
events for mne
'''
#-----
events_idx = np.array([],dtype=np.int64)
print " ---> START EPOCHER extract condition : " + condi
ep_key = '/epocher/' + condi
if not( ep_key in self.HDFobj.keys() ): return
#--- get pandas data frame
df = self.hdf_obj_get_dataframe(ep_key)
#--- get stored attributes -> epocher_parameter -> ...
ep_param = self.hdf_obj_get_attributes(key=ep_key,attr='epocher_parameter')
info_param = self.hdf_obj_get_attributes(key=ep_key,attr='info_parameter')
evt = dict()
#--- get channel e.g. 'STI 014' stimulus.events.stim_channel
evt['channel'] = ep_param[ ep_param['marker_channel'] ]['events']['stim_channel']
#--- get output type [onset/offset]
evt['output'] = ep_param[ ep_param['marker_channel'] ]['events']['output']
#--- <onset/offset> for stimulus onset/offset <rt_onset/rt_offset> for response onset/offset
evt['marker_type'] = ep_param['marker_type']
#--- time
evt['time']= self.events_get_parameter(hdf_parameter=ep_param,param=time)
#--- baseline
evt['bc'] = self.events_get_parameter(hdf_parameter=ep_param['baseline'],
param=baseline,key_list=('output','channel','baseline','type'))
#--- ck for artefacts => set bads to -1
if exclude_events :
for kbad in ( exclude_events.keys() ):
ep_bads_cnt0 = df['bads'][ df['bads'] == self.idx_bad ].size
for idx in range( exclude_events[kbad]['tsl'].shape[-1] ) : #df.index :
df['bads'][ ( exclude_events[kbad]['tsl'][0,idx] < df[evt['output']] ) & ( df[evt['output']] < exclude_events[kbad]['tsl'][1,idx] ) ] = self.idx_bad
ep_bads_cnt1 = df['bads'][df['bads'] == self.idx_bad].size
if self.verbose:
print"\n---> Exclude artefacts " + condi + " : " + kbad
print"---> Tmin: %0.3f Tmax %0.3f" % (exclude_events[kbad]['tmin'],exclude_events[kbad]['tmax'])
print"---> bad epochs : %d" %(ep_bads_cnt0)
print"---> artefact epochs: %d" %(ep_bads_cnt1 - ep_bads_cnt0)
print"---> excluded epochs: %d" %(ep_bads_cnt1)
#if (ep_bads_cnt1 - ep_bads_cnt0) > 0:
# assert "FOUND"
df['selected'] = 0
df['weighted_selected'] = 0
#--- response type idx to process
if ep_param['response_matching'] :
if ep_param['response_matching_type'] is None:
rt_type_idx = self.rt_type_as_index('HIT')
else:
rt_type_idx = self.rt_type_as_index(ep_param['response_matching_type'])
#--- find response type idx
events_idx = df[ evt['marker_type'] ][ (df['rt_type'] == rt_type_idx) & (df['bads'] != self.idx_bad) ].index
if events_idx.size:
#df.loc['selected',events_idx] = 1
df['selected'][events_idx] = 1
# data.loc[data['name'] == 'fred', 'A'] = 0
#--- apply weights to reduce/equalize number of events for all condition
if hasattr(weights,'min_counts'):
if weights['min_counts']:
assert weights['min_counts'] <= events_idx.size,"!!!ERROR minimum required trials greater than number of trials !!!"
w_value = 0.0
if weights['min_counts'] < events_idx.size:
if weights['method']=='median':
w_value = df['rt'][events_idx].median()
elif weights['method']=='mean':
w_value = df['rt'][events_idx].mean()
#else: # random
# w_idx = np.random.shuffle( np.array( events_idx ) )
#--- find minimum from median as index from events_idx => index of index !!!
w_idx = np.argsort( np.abs( np.array( df['rt'][events_idx] - w_value )))
w_events_idx = np.array( events_idx[ w_idx[ 0:weights['min_counts'] ] ] )
w_events_idx.sort()
df['weighted_selected'][w_events_idx] = 1
#df.loc['weighted_selected',w_events_idx] = 1
if self.verbose:
print"RESPONSE MATCHING => Weighted event index => method:" + weights['method']+" => value: %0.3f" % (w_value)
print w_events_idx
print"RT :"
print df['rt'][w_events_idx]
elif weights['min_counts'] == events_idx.size:
df['weighted_selected'][events_idx] = 1
#df.loc['weighted_selected',events_idx] = 1
#--- update new weighted event idx
events_idx = df[ df['weighted_selected'] > 0 ].index
else :
#--- no response matching
events_idx = df[ evt['marker_type'] ][ df['bads'] != self.idx_bad ].index
#-- ck for eaqual number of trials over conditons#
if events_idx.size:
if hasattr(weights,'min_counts'):
if weights['min_counts']:
assert weights['min_counts'] <= events_idx.size,"!!!ERROR minimum required trials greater than number of trials !!!"
if weights['min_counts'] < events_idx.size:
w_idx = np.array( events_idx.values )
np.random.shuffley(w_idx)
w_events_idx = w_idx[0:weights['min_counts'] ]
df['weighted_selected'][w_events_idx] = 1
events_idx = df[ df['weighted_selected'] > 0 ].index
elif weights['min_counts'] == events_idx.size:
df['weighted_selected'][events_idx] = 1
if events_idx.size:
#--- make event array
evt['events'] = np.zeros(( events_idx.size, 3), dtype=np.long)
evt['events'][:,0] = df[ evt['marker_type'] ][events_idx]
if ep_param['marker_channel'] == 'response':
evt['events'][:,2] = ep_param['response']['event_id']
else:
evt['events'][:,2] = df['id'][0]
#--- ck if events exist
evt['event_id'] = int( evt['events'][0,2] )
#--- baseline events
#evt['bc']['output'] = ep_param[ evt['bc']['channel'] ]['events']['output']
evt['bc']['events'] = np.zeros((events_idx.size,3),dtype=np.long)
evt['bc']['events'][:,0] = df[ evt['bc']['output'] ][events_idx]
if ep_param[ evt['bc']['channel'] ] == 'response':
evt['bc']['events'][:,2] = ep_param['response']['event_id']
else:
evt['bc']['events'][:,2] = df['id'][0]
evt['bc']['event_id'] = int( evt['bc']['events'][0,2] )
else:
#--- no events
evt['events'] = np.array([])
evt['event_id'] = np.array([])
evt['bc']['events'] = np.array([])
#---update HDF: store df with updated bads & selected & restore user-attribute
storer_attrs = {'epocher_parameter': ep_param,'info_parameter':info_param}
self.hdf_obj_update_dataframe(df,key=ep_key,reset=False,**storer_attrs)
#---
if self.verbose:
print" ---> Export Events from HDF to MNE-Events for condition: " + condi
print" events: %d" % events_idx.size
bads = df[ evt['marker_type'] ][ (df['bads']== self.idx_bad) ]
print" bads : " + str(bads.shape)
print bads
print"\nEvent Info:"
print evt
print"\n\n"
return evt
def events_get_parameter(self,hdf_parameter=None,param=None,key_list=('time_pre','time_post') ):
"""
:param hdf_parameter:
:param ep_param
:param key_list=('time_pre','time_post','baseline')
:return:
param_out
"""
param_out = dict()
for k in key_list:
if param.has_key(k):
param_out[k] = param[k]
elif hdf_parameter.has_key(k) :
if hdf_parameter[k]:
param_out[k] = hdf_parameter[k]
if self.verbose:
print " --> Parameter: "
print param_out
return param_out
def events_update_artifact_time_window(self,aev=None,tmin=None,tmax=None):
"""
:param aev:
:param tmin:
:param tmax:
:return:
"""
import numpy as np
artifact_events = dict()
for kbad in ( aev.keys() ):
node_name = '/ocarta/' + kbad
#--- ck if node exist
try:
self.HDFobj.get(node_name)
except:
continue
artifact_events[kbad]= {'tmin':None,'tmax':None,'tsl':np.array([])}
if tmin:
tsl0= self.raw.time_as_index(tmin)
artifact_events[kbad]['tmin'] = tmin
else:
tsl0= self.raw.time_as_index( aev[kbad]['tmin'] )
artifact_events[kbad]['tmin'] = aev[kbad]['tmin']
if tmax:
tsl1= self.raw.time_as_index(tmax)
artifact_events[kbad]['tmax'] = tmax
else:
tsl1= self.raw.time_as_index(aev[kbad]['tmax'] )
artifact_events[kbad]['tmax'] = aev[kbad]['tmax']
df_bad = self.HDFobj.get(node_name)
artifact_events[kbad]['tsl'] = np.array([ df_bad['onset']+tsl0, df_bad['onset']+tsl1 ] )
# aev[0,ixd] ->tsl0 aev[1,idx] -> tsl1
#artifact_events[kbad]['tsl'] = np.zeros( shape =( df_bad['onset'].size,2) )
#artifact_events[kbad]['tsl'][:,0] = df_bad['onset'] +tsl0
#artifact_events[kbad]['tsl'][:,1] = df_bad['onset'] +tsl1
return artifact_events
def events_apply_epochs_and_baseline(self,raw,evt=None,reject=None,proj=None,picks=None):
'''
generate epochs from raw and apply baseline correction
input:
raw obj
evt=event dict
check for bad epochs due to short baseline onset/offset intervall and drop them
output:
baseline corrected epoch data
!!! exclude trigger channels: stim and resp !!!
bc correction: true /false
'''
ep_bads = None
ep_bc_mean = None
bc = False
#--- get epochs no bc correction
ep = mne.Epochs(self.raw,evt['events'],evt['event_id'],evt['time']['time_pre'],evt['time']['time_post'],
baseline=None,picks=picks,reject=reject,proj=proj,preload=True,verbose=False) # self.verbose)
if ('bc' in evt.keys() ):
if evt['bc']['baseline']:
if len( evt['bc']['events'] ):
if evt['bc']['baseline'][0]:
bc_time_pre = evt['bc']['baseline'][0]
else:
bc_time_pre = evt['time']['time_pre']
if evt['bc']['baseline'][1]:
bc_time_post = evt['bc']['baseline'][1]
else:
bc_time_post = evt['time']['time_post']
picks_bc = jumeg_base.picks.exclude_trigger(ep)
#--- create baseline epochs -> from stimulus baseline
ep_bc = mne.Epochs(self.raw,evt['bc']['events'],evt['bc']['event_id'],
bc_time_pre,bc_time_post,baseline=None,
picks=picks_bc,reject=reject,proj=proj,preload=True,verbose=self.verbose)
#--- check for equal epoch size epoch_baseline vs epoch for np bradcasting
ep_goods = np.intersect1d(ep_bc.selection,ep.selection)
#--- bad epochs & drop them
ep_bads = np.array( np.where(np.in1d(ep_bc.selection,ep_goods,invert=True)) )
if ep_bads:
ep_bc.drop(ep_bads.flatten())
#--- calc mean from bc epochs
ep_bc_mean = np.mean(ep_bc._data, axis = -1)
ep._data[:,picks_bc,:] -= ep_bc_mean[:,:,np.newaxis]
bc = True
#---
if self.verbose:
print" ---> Epocher apply epoch and baseline -> mne epochs:"
print ep
print" id: %d <pre time>: %0.3f <post time>: %0.3f" % (evt['event_id'],evt['time']['time_pre'],evt['time']['time_post'])
print" --> baseline correction : %r" %(bc)
if bc:
print" done -> baseline correction"
print" bc id: %d <pre time>: %0.3f <post time>: %0.3f" % (evt['bc']['event_id'],bc_time_pre,bc_time_post)
print"\n --> Epoch selection: "
print ep.selection
print" --> Baseline epoch selection: "
print ep_bc.selection
print"\n --> good epochs selected:"
print ep_goods
print"\n --> bad epochs & drop them:"
print ep_bads
print"\n"
return ep,bc
jumeg_epocher_events = JuMEG_Epocher_Events()
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e153.py | 2 | 6519 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd LSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using LSTM not BLSTM
e151
* Max pooling
"""
def set_subsample_target(net, epoch):
net.source.subsample_target = 4
net.source.input_padding = 5
net.generate_validation_data_and_set_shapes()
def exp_a(name):
# skip_prob = 0.7
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=2000,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
# include_diff=True,
input_padding=3,
subsample_target=4
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=1000,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=.1, clip_range=(-1, 1)),
layers_config=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(25)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 2,
'stride': 1,
'nonlinearity': sigmoid,
'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1 # pool over the time axis
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| mit |
pompiduskus/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
heli522/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
eblossom/gnuradio | gr-digital/examples/berawgn.py | 32 | 4886 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
BER simulation for QPSK signals, compare to theoretical values.
Change the N_BITS value to simulate more bits per Eb/N0 value,
thus allowing to check for lower BER values.
Lower values will work faster, higher values will use a lot of RAM.
Also, this app isn't highly optimized--the flow graph is completely
reinstantiated for every Eb/N0 value.
Of course, expect the maximum value for BER to be one order of
magnitude below what you chose for N_BITS.
"""
import math
import numpy
from gnuradio import gr, digital
from gnuradio import analog
from gnuradio import blocks
import sys
try:
from scipy.special import erfc
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
# Best to choose powers of 10
N_BITS = 1e7
RAND_SEED = 42
def berawgn(EbN0):
""" Calculates theoretical bit error rate in AWGN (for BPSK and given Eb/N0) """
return 0.5 * erfc(math.sqrt(10**(float(EbN0)/10)))
class BitErrors(gr.hier_block2):
""" Two inputs: true and received bits. We compare them and
add up the number of incorrect bits. Because integrate_ff()
can only add up a certain number of values, the output is
not a scalar, but a sequence of values, the sum of which is
the BER. """
def __init__(self, bits_per_byte):
gr.hier_block2.__init__(self, "BitErrors",
gr.io_signature(2, 2, gr.sizeof_char),
gr.io_signature(1, 1, gr.sizeof_int))
# Bit comparison
comp = blocks.xor_bb()
intdump_decim = 100000
if N_BITS < intdump_decim:
intdump_decim = int(N_BITS)
self.connect(self,
comp,
blocks.unpack_k_bits_bb(bits_per_byte),
blocks.uchar_to_float(),
blocks.integrate_ff(intdump_decim),
blocks.multiply_const_ff(1.0/N_BITS),
self)
self.connect((self, 1), (comp, 1))
class BERAWGNSimu(gr.top_block):
" This contains the simulation flow graph "
def __init__(self, EbN0):
gr.top_block.__init__(self)
self.const = digital.qpsk_constellation()
# Source is N_BITS bits, non-repeated
data = map(int, numpy.random.randint(0, self.const.arity(), N_BITS/self.const.bits_per_symbol()))
src = blocks.vector_source_b(data, False)
mod = digital.chunks_to_symbols_bc((self.const.points()), 1)
add = blocks.add_vcc()
noise = analog.noise_source_c(analog.GR_GAUSSIAN,
self.EbN0_to_noise_voltage(EbN0),
RAND_SEED)
demod = digital.constellation_decoder_cb(self.const.base())
ber = BitErrors(self.const.bits_per_symbol())
self.sink = blocks.vector_sink_f()
self.connect(src, mod, add, demod, ber, self.sink)
self.connect(noise, (add, 1))
self.connect(src, (ber, 1))
def EbN0_to_noise_voltage(self, EbN0):
""" Converts Eb/N0 to a complex noise voltage (assuming unit symbol power) """
return 1.0 / math.sqrt(self.const.bits_per_symbol() * 10**(float(EbN0)/10))
def simulate_ber(EbN0):
""" All the work's done here: create flow graph, run, read out BER """
print "Eb/N0 = %d dB" % EbN0
fg = BERAWGNSimu(EbN0)
fg.run()
return numpy.sum(fg.sink.data())
if __name__ == "__main__":
EbN0_min = 0
EbN0_max = 15
EbN0_range = range(EbN0_min, EbN0_max+1)
ber_theory = [berawgn(x) for x in EbN0_range]
print "Simulating..."
ber_simu = [simulate_ber(x) for x in EbN0_range]
f = pylab.figure()
s = f.add_subplot(1,1,1)
s.semilogy(EbN0_range, ber_theory, 'g-.', label="Theoretical")
s.semilogy(EbN0_range, ber_simu, 'b-o', label="Simulated")
s.set_title('BER Simulation')
s.set_xlabel('Eb/N0 (dB)')
s.set_ylabel('BER')
s.legend()
s.grid()
pylab.show()
| gpl-3.0 |
vermouthmjl/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 14 | 2001 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
hpfem/agros2d | resources/test/test_suite/particle_tracing/pairparticle_tracing.py | 1 | 5576 | #!/usr/bin/python
import pylab as pl
import matplotlib.cm as cm
import json
import time
from visual import *
from math import pi, sqrt
pl.rcParams['figure.figsize'] = 10, 10
pl.rcParams['font.size'] = 11
pl.rcParams['legend.fontsize'] = 9
pl.rcParams['text.usetex'] = True
pl.rcParams['text.latex.preamble'] = "\usepackage{mathptmx}"
pl.rcParams['axes.grid'] = False
def save_data(data, variant):
file = open('{0}.dat'.format(variant), 'w')
json.dump(data, file)
file.close()
def load_data(variant):
file = open('{0}.dat'.format(variant), 'r')
data = json.load(file)
file.close()
return data
def compare_data(reference_data, data, coordinate = 'x'):
assert(len(reference_data) == len(data))
difference = 0
for i in range(len(data)):
for j in range(len(data[i][coordinate])):
pass
def plot_data(data, variant):
pl.figure()
pair = True
color = None
for i in range(len(data)):
if pair:
color = cm.jet(i/float(len(data)), 1)
pl.plot(data[i]['x'], data[i]['y'], '-', color=color)
pair = False
else:
pl.plot(data[i]['x'], data[i]['y'], '--', color=color)
pair = True
pl.xlabel("$x\,\mathrm{(m)}$")
pl.ylabel("$y\,\mathrm{(m)}$")
pl.savefig('{0}-trajectory.pdf'.format(variant), bbox_inches='tight')
class MultiParticleTest(object):
def __init__(self, variant = 'test', visualization = False,
gravity_force = True, electrostatic_force = False,
particle2particle_electric_force = True):
scene.visible = visualization
""" test properties """
self.variant = variant
self.d = 1
self.h = 1
self.x0 = 0.001
self.y0 = 0.75
self.v0 = 0.25
self.U = 0
self.dx = 0.003
self.dy = 0.025
self.time_step = 1e-6
self.rate = 1e6
self.time = [0]
self.gravity_force = gravity_force
self.electrostatic_force = electrostatic_force
self.particle2particle_electric_force = particle2particle_electric_force
""" particles """
self.particles = []
self.particles.append(sphere(radius = 0.02, pos=(self.x0, self.y0, 0), color= (0.75, 0, 0),
track = curve(radius = 0.01, color=(0.75, 0, 0))))
self.particles[0].charge = -5e-10
self.particles[0].vel = vector(0, -self.v0, 0)
self.particles.append(sphere(radius = 0.02, pos=(self.x0-self.dx, self.y0-self.dy, 0), color= (0, 0, 0.75),
track = curve(radius = 0.01, color=(0, 0, 0.75))))
self.particles[1].charge = 1e-10
self.particles[1].vel = vector(0, 0, 0)
for particle in self.particles:
particle.run = True
particle.velocity = [[], [], []]
particle.particle2particle_electric_force = []
particle.mass = 3.5e-5
def force(self, particle1, particle2):
Fg = vector(0, 0, 0)
Fe = vector(0, 0, 0)
Fp2p_e = vector(0, 0, 0)
""" electrostatic force """
if self.electrostatic_force:
Fe.x = particle1.charge * self.U/(self.d/2.0 + particle1.pos.x)
distance = sqrt((particle1.pos.x - particle2.pos.x)**2 +
(particle1.pos.y - particle2.pos.y)**2 +
(particle1.pos.z - particle2.pos.z)**2)
""" particle to particle electric force """
if (self.particle2particle_electric_force and distance > 0):
F = (particle1.charge * particle2.charge) / (4.0 * pi * 8.854e-12 * distance**3)
Fp2p_e.x = F * (particle1.pos.x - particle2.pos.x)
Fp2p_e.y = F * (particle1.pos.y - particle2.pos.y)
Fp2p_e.z = F * (particle1.pos.z - particle2.pos.z)
particle1.particle2particle_electric_force.append(sqrt(Fp2p_e.x**2 + Fp2p_e.y**2 + Fp2p_e.z**2))
""" gravity force """
if self.gravity_force:
Fg.y = - particle1.mass * 9.823
F = vector(Fp2p_e.x + Fe.x,
Fg.y + Fe.y,
Fe.z)
return F
def solve(self):
start_time = time.time()
run = True
while(run):
rate(self.rate)
self.time.append(self.time[-1] + self.time_step)
for particle in self.particles:
if not particle.run:
continue
particle.pos = particle.pos + particle.vel * self.time_step
particle.vel = particle.vel + self.force(particle, self.particles[self.particles.index(particle) - 1]) / particle.mass * self.time_step
particle.velocity[0].append(particle.vel.x)
particle.velocity[1].append(particle.vel.y)
particle.velocity[2].append(particle.vel.z)
particle.track.append(pos = particle.pos)
if (particle.pos.y < 0):
particle.run = False
if ((not self.particles[self.particles.index(particle) - 1].run) and
(not particle.run)):
run = False
self.elapsed_time = time.time()-start_time
self.save()
def save(self):
data = []
N = 1
for particle in self.particles:
data.append({'x' : list(particle.track.x)[::N], 'y' : list(particle.track.y)[::N], 'z' : list(particle.track.z)[::N],
'vx' : particle.velocity[0][::N], 'vy' : particle.velocity[1][::N], 'vz' : particle.velocity[2][::N]})
data.append({'te' : self.elapsed_time})
save_data(data, self.variant)
def convergence():
for step in [1e-7]: #1e-2, 1e-3, 1e-4, 1e-5, 1e-6
variant = 'convergence-euler-{0}'.format(step)
test = MultiParticleTest(variant = variant)
test.time_step = step
test.solve()
def free_fall():
test = MultiParticleTest(variant = 'free_fall')
test.solve()
if __name__ == '__main__':
convergence()
#free_fall()
| gpl-2.0 |
edarin/population_simulator | src/tools_erfs.py | 1 | 1885 | import pandas as pd
import numpy as np
import json
def open_json(file):
try:
with open(file, 'r') as fp:
return json.load(fp)
except Exception as e:
print('Failed to load from graph ' + repr(e))
return {}
def get_individu(table):
'''
Table utilisee : output/erfs_fpr_2012.h5
Fait les merge necessaire pour creer table individu
'''
erfs = pd.HDFStore(table)
emploi_ind = erfs['/fpr_irf12e12t4']
revenu_ind = erfs['/fpr_indiv_2012_retropole']
individu = emploi_ind.merge(revenu_ind, on= ['noindiv', 'noi', 'ident12'], how='inner')
individu = individu[(individu.naia != 1854) & (individu.naia != 0)]
return individu
def get_individu_simplified(table):
'''
Selection des colonnes d'interet
'''
individu = table[['acteu6', 'naia', 'chpub', 'matri', 'dip', 'age', 'nbenf18', 'nbenf18m',
'nbenf3', 'nbenf6','noi', 'retrai', 'revent','noindiv',
'sexe', 'statut', 'typmen15', 'rag_i', 'ric_i', 'rnc_i', 'chomage_i',
'pens_alim_recue_i', 'retraites_i', 'salaires_i', 'wprm', 'maahe']]
return individu
def get_menage(table):
'''
Table utilisee : output/erfs_fpr_2012.h5
Fait les merge necessaire pour creer table menage
'''
erfs = pd.HDFStore(table)
emploi_men = erfs['/fpr_mrf12e12t4']
revenu_men = erfs['/fpr_menage_2012_retropole']
menage = revenu_men.merge(emploi_men, on= ['ident12'])
assert menage.isnull().values.any() == False
return menage
def get_salaire(table, groupby):
'''
Recuperer le salaire en fonction du groupby
'''
salaire = table.groupby(groupby).salaires_i
return salaire
def get_weights(table, groupby):
'''
Recuperer les poids en fonction du groupby
'''
weights = table.groupby(groupby).wprm
return weights
| agpl-3.0 |
ibab/tensorflow | tensorflow/contrib/learn/python/learn/tests/test_saver.py | 3 | 3422 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
class SaverTest(tf.test.TestCase):
def testIris(self):
path = tf.test.get_temp_dir() + '/tmp.saver'
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
new_classifier = learn.TensorFlowEstimator.restore(path)
self.assertEqual(type(new_classifier), type(classifier))
score = accuracy_score(iris.target, new_classifier.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testCustomModel(self):
path = tf.test.get_temp_dir() + '/tmp.saver2'
random.seed(42)
iris = datasets.load_iris()
def custom_model(X, y):
return learn.models.logistic_regression(X, y)
classifier = learn.TensorFlowEstimator(model_fn=custom_model, n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
new_classifier = learn.TensorFlowEstimator.restore(path)
self.assertEqual(type(new_classifier), type(classifier))
score = accuracy_score(iris.target, new_classifier.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testDNN(self):
path = tf.test.get_temp_dir() + '/tmp_saver3'
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
new_classifier = learn.TensorFlowEstimator.restore(path)
self.assertEqual(type(new_classifier), type(classifier))
score = accuracy_score(iris.target, new_classifier.predict(iris.data))
self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testNoFolder(self):
with self.assertRaises(ValueError):
learn.TensorFlowEstimator.restore('no_model_path')
def testNoCheckpoints(self):
path = tf.test.get_temp_dir() + '/tmp/tmp.saver4'
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
os.remove(os.path.join(path, 'checkpoint'))
with self.assertRaises(ValueError):
learn.TensorFlowEstimator.restore(path)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
sirallen/nic-structure | py/_geolocator.py | 1 | 2989 | import pandas as pd
import numpy as np
import os, googlemaps, pickle, re
import argparse
argparser = argparse.ArgumentParser(description='Geolocate entities in NIC Organization Hierarchies.')
argparser.add_argument('-r', '--rssd', nargs='+', help='list of rssds to geolocate')
argparser.add_argument('-f', '--files', nargs='+', help='list of files to geolocate')
argparser.add_argument('-a', '--after', help='select files after a date yyyymmdd (inclusive)')
argparser.add_argument('-b', '--before', help='select files before a date yyyymmdd (exclusive)')
args = argparser.parse_args()
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
api_key = open('GOOGLEMAPS_API_KEY').read()
gmaps = googlemaps.Client(key=api_key)
# LocationMaster -- dictionary used to store geodata; if an entity's location has
# already been geolocated, just pull the information from here; if it hasn't,
# then add the geodata retrieved from Google Maps
if os.path.isfile('../data/app/LocationMaster'):
master = pickle.load(open('../data/app/LocationMaster', 'rb+'))
else:
master = dict()
readfiles = [os.path.join('../data/txt', f) for f in os.listdir('../data/txt')]
if args.rssd:
readfiles = filter(lambda x: re.search('\\d+', x).group() in args.rssd, readfiles)
if args.before:
readfiles = filter(lambda x: re.search('(?<=-)\\d+', x).group() < args.before, readfiles)
if args.after:
readfiles = filter(lambda x: re.search('(?<=-)\\d+', x).group() >= args.after, readfiles)
if args.files:
readfiles = args.files
for readfile in readfiles:
print('Reading ', readfile)
# http://stackoverflow.com/questions/17092671
df = pd.read_csv(readfile, dtype={'Parent': object})
df['label'] = np.nan
df['lat'] = np.nan
df['lng'] = np.nan
uniqLoc = df['Loc'].unique()
for u in uniqLoc:
if u not in master:
print('Requesting ' + u)
result = gmaps.geocode(u)
if result:
coord = result[0]['geometry']['location']
addr = result[0]['formatted_address']
addr = addr.replace(', USA', '')
addr = re.sub('([^,]*,).*, (.*)', '\\1 \\2', addr)
addr = re.sub('[0-9]', '', addr).strip()
#print('\t\tFound ' + addr)
master[u] = dict()
master[u]['label'] = addr
master[u]['lat'] = float(np.round(coord['lat'], 7))
master[u]['lng'] = float(np.round(coord['lng'], 7))
else:
#print('\t\t*no result')
print(u + ' returned result of length zero')
continue
df.loc[df.Loc == u,'label'] = master[u]['label']
df.loc[df.Loc == u,'lat'] = master[u]['lat']
df.loc[df.Loc == u,'lng'] = master[u]['lng']
# Save
df.to_csv(readfile, index=False, encoding='utf-8')
#df.to_json(readfile.replace('txt', 'json'), orient='records')
pickle.dump(master, open('../data/app/LocationMaster', 'wb+'))
pd.DataFrame.from_dict(master, orient='index').to_csv('../data/LocationMaster.csv')
| mit |
zingale/pyro2 | multigrid/general_MG.py | 2 | 9053 | r"""
This multigrid solver is build from multigrid/MG.py
and implements a more general solver for an equation of the form
.. math::
\alpha \phi + \nabla\cdot { \beta \nabla \phi } + \gamma \cdot \nabla \phi = f
where alpha, beta, and gamma are defined on the same grid as phi.
These should all come in as cell-centered quantities. The solver
will put beta on edges. Note that gamma is a vector here, with
x- and y-components.
A cell-centered discretization for phi is used throughout.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import multigrid.edge_coeffs as ec
import multigrid.MG as MG
np.set_printoptions(precision=3, linewidth=128)
class GeneralMG2d(MG.CellCenterMG2d):
r"""
this is a multigrid solver that supports our general elliptic
equation.
we need to accept a coefficient ``CellCenterData2d`` object with
fields defined for ``alpha``, ``beta``, ``gamma_x``, and ``gamma_y`` on the
fine level.
We then restrict this data through the MG hierarchy (and
average beta to the edges).
we need a ``new compute_residual()`` and ``smooth()`` function, that
understands these coeffs.
"""
def __init__(self, nx, ny, xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0,
xl_BC_type="dirichlet", xr_BC_type="dirichlet",
yl_BC_type="dirichlet", yr_BC_type="dirichlet",
xl_BC=None, xr_BC=None,
yl_BC=None, yr_BC=None,
nsmooth=10, nsmooth_bottom=50,
verbose=0,
coeffs=None,
true_function=None, vis=0, vis_title=""):
"""
here, coeffs is a CCData2d object
"""
# we'll keep a list of the beta coefficients averaged to the
# interfaces on each level -- note: these will already be
# scaled by 1/dx**2
self.beta_edge = []
# initialize the MG object with the auxillary fields
MG.CellCenterMG2d.__init__(self, nx, ny, ng=1,
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax,
xl_BC_type=xl_BC_type, xr_BC_type=xr_BC_type,
yl_BC_type=yl_BC_type, yr_BC_type=yr_BC_type,
xl_BC=xl_BC, xr_BC=xr_BC,
yl_BC=yl_BC, yr_BC=yr_BC,
alpha=0.0, beta=0.0,
nsmooth=nsmooth, nsmooth_bottom=nsmooth_bottom,
verbose=verbose,
aux_field=["alpha", "beta", "gamma_x", "gamma_y"],
aux_bc=[coeffs.BCs["alpha"], coeffs.BCs["beta"],
coeffs.BCs["gamma_x"], coeffs.BCs["gamma_y"]],
true_function=true_function, vis=vis,
vis_title=vis_title)
# the coefficents come in a dictionary. Set the coefficients
# and restrict them down the hierarchy we only need to do this
# once. We need to hold the original coeffs in our grid so we
# can do a ghost cell fill.
for c in ["alpha", "beta", "gamma_x", "gamma_y"]:
v = self.grids[self.nlevels-1].get_var(c)
v.v()[:, :] = coeffs.get_var(c).v()
self.grids[self.nlevels-1].fill_BC(c)
n = self.nlevels-2
while n >= 0:
f_patch = self.grids[n+1]
c_patch = self.grids[n]
coeffs_c = c_patch.get_var(c)
coeffs_c.v()[:, :] = f_patch.restrict(c).v()
self.grids[n].fill_BC(c)
n -= 1
# put the beta coefficients on edges
beta = self.grids[self.nlevels-1].get_var("beta")
self.beta_edge.insert(0, ec.EdgeCoeffs(self.grids[self.nlevels-1].grid, beta))
n = self.nlevels-2
while n >= 0:
self.beta_edge.insert(0, self.beta_edge[0].restrict())
n -= 1
def smooth(self, level, nsmooth):
"""
Use red-black Gauss-Seidel iterations to smooth the solution
at a given level. This is used at each stage of the V-cycle
(up and down) in the MG solution, but it can also be called
directly to solve the elliptic problem (although it will take
many more iterations).
Parameters
----------
level : int
The level in the MG hierarchy to smooth the solution
nsmooth : int
The number of r-b Gauss-Seidel smoothing iterations to perform
"""
v = self.grids[level].get_var("v")
f = self.grids[level].get_var("f")
myg = self.grids[level].grid
dx = myg.dx
dy = myg.dy
self.grids[level].fill_BC("v")
alpha = self.grids[level].get_var("alpha")
gamma_x = 0.5*self.grids[level].get_var("gamma_x")/dx
gamma_y = 0.5*self.grids[level].get_var("gamma_y")/dy
# these are already scaled by 1/dx**2 in the EdgeCoeffs
# construction
beta_x = self.beta_edge[level].x
beta_y = self.beta_edge[level].y
# do red-black G-S
for i in range(nsmooth):
# do the red black updating in four decoupled groups
#
#
# | | |
# --+-------+-------+--
# | | |
# | 4 | 3 |
# | | |
# --+-------+-------+--
# | | |
# jlo | 1 | 2 |
# | | |
# --+-------+-------+--
# | ilo | |
#
# groups 1 and 3 are done together, then we need to
# fill ghost cells, and then groups 2 and 4
for n, (ix, iy) in enumerate([(0, 0), (1, 1), (1, 0), (0, 1)]):
denom = (
alpha.ip_jp(ix, iy, s=2) -
beta_x.ip_jp(1+ix, iy, s=2) - beta_x.ip_jp(ix, iy, s=2) -
beta_y.ip_jp(ix, 1+iy, s=2) - beta_y.ip_jp(ix, iy, s=2))
v.ip_jp(ix, iy, s=2)[:, :] = (f.ip_jp(ix, iy, s=2) -
# (beta_{i+1/2,j} + gamma^x_{i,j}) phi_{i+1,j}
(beta_x.ip_jp(1+ix, iy, s=2) + gamma_x.ip_jp(ix, iy, s=2)) * v.ip_jp(1+ix, iy, s=2) -
# (beta_{i-1/2,j} - gamma^x_{i,j}) phi_{i-1,j}
(beta_x.ip_jp(ix, iy, s=2) - gamma_x.ip_jp(ix, iy, s=2)) * v.ip_jp(-1+ix, iy, s=2) -
# (beta_{i,j+1/2} + gamma^y_{i,j}) phi_{i,j+1}
(beta_y.ip_jp(ix, 1+iy, s=2) + gamma_y.ip_jp(ix, iy, s=2)) * v.ip_jp(ix, 1+iy, s=2) -
# (beta_{i,j-1/2} - gamma^y_{i,j}) phi_{i,j-1}
(beta_y.ip_jp(ix, iy, s=2) - gamma_y.ip_jp(ix, iy, s=2)) * v.ip_jp(ix, -1+iy, s=2)) / denom
if n == 1 or n == 3:
self.grids[level].fill_BC("v")
if self.vis == 1:
plt.clf()
plt.subplot(221)
self._draw_solution()
plt.subplot(222)
self._draw_V()
plt.subplot(223)
self._draw_main_solution()
plt.subplot(224)
self._draw_main_error()
plt.suptitle(self.vis_title, fontsize=18)
plt.draw()
plt.savefig("mg_%4.4d.png" % (self.frame))
self.frame += 1
def _compute_residual(self, level):
""" compute the residual and store it in the r variable"""
v = self.grids[level].get_var("v")
f = self.grids[level].get_var("f")
r = self.grids[level].get_var("r")
myg = self.grids[level].grid
dx = myg.dx
dy = myg.dy
alpha = self.grids[level].get_var("alpha")
gamma_x = 0.5*self.grids[level].get_var("gamma_x")/dx
gamma_y = 0.5*self.grids[level].get_var("gamma_y")/dy
# these already have a 1/dx**2 scaling in them
beta_x = self.beta_edge[level].x
beta_y = self.beta_edge[level].y
# compute the residual
# r = f - L_eta phi
L_eta_phi = (
# alpha_{i,j} phi_{i,j}
alpha.v()*v.v() +
# beta_{i+1/2,j} (phi_{i+1,j} - phi_{i,j})
beta_x.ip(1)*(v.ip(1) - v.v()) - \
# beta_{i-1/2,j} (phi_{i,j} - phi_{i-1,j})
beta_x.v()*(v.v() - v.ip(-1)) + \
# beta_{i,j+1/2} (phi_{i,j+1} - phi_{i,j})
beta_y.jp(1)*(v.jp(1) - v.v()) - \
# beta_{i,j-1/2} (phi_{i,j} - phi_{i,j-1})
beta_y.v()*(v.v() - v.jp(-1)) + \
# gamma^x_{i,j} (phi_{i+1,j} - phi_{i-1,j})
gamma_x.v()*(v.ip(1) - v.ip(-1)) + \
# gamma^y_{i,j} (phi_{i,j+1} - phi_{i,j-1})
gamma_y.v()*(v.jp(1) - v.jp(-1)))
r.v()[:, :] = f.v() - L_eta_phi
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
mblondel/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
dongsenfo/pymatgen | pymatgen/analysis/chemenv/coordination_environments/structure_environments.py | 3 | 89701 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains objects that are used to describe the environments in a structure. The most detailed object
(StructureEnvironments) contains a very thorough analysis of the environments of a given atom but is difficult to
used as such. The LightStructureEnvironments object is a lighter version that is obtained by applying a "strategy"
on the StructureEnvironments object. Basically, the LightStructureEnvironments provides the coordination environment(s)
and possibly some fraction corresponding to these.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "[email protected]"
__date__ = "Feb 20, 2016"
import numpy as np
from collections import OrderedDict
from pymatgen.core.sites import PeriodicSite
from monty.json import MSONable, MontyDecoder
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.structure import Structure
from monty.json import jsanitize
from pymatgen.analysis.chemenv.coordination_environments.voronoi import DetailedVoronoiContainer
from pymatgen.analysis.chemenv.utils.chemenv_errors import ChemenvError
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
allcg = AllCoordinationGeometries()
symbol_cn_mapping = allcg.get_symbol_cn_mapping()
class StructureEnvironments(MSONable):
"""
Class used to store the chemical environments of a given structure.
"""
AC = AdditionalConditions()
class NeighborsSet():
"""
Class used to store a given set of neighbors of a given site (based on the detailed_voronoi).
"""
def __init__(self, structure, isite, detailed_voronoi, site_voronoi_indices, sources=None):
self.structure = structure
self.isite = isite
self.detailed_voronoi = detailed_voronoi
self.voronoi = detailed_voronoi.voronoi_list2[isite]
myset = set(site_voronoi_indices)
if len(myset) != len(site_voronoi_indices):
raise ValueError('Set of neighbors contains duplicates !')
self.site_voronoi_indices = sorted(myset)
if sources is None:
self.sources = [{'origin': 'UNKNOWN'}]
elif isinstance(sources, list):
self.sources = sources
else:
self.sources = [sources]
def get_neighb_voronoi_indices(self, permutation):
return [self.site_voronoi_indices[ii] for ii in permutation]
@property
def neighb_coords(self):
return [self.voronoi[inb]['site'].coords for inb in self.site_voronoi_indices]
@property
def neighb_coordsOpt(self):
return self.detailed_voronoi.voronoi_list_coords[self.isite].take(self.site_voronoi_indices, axis=0)
@property
def neighb_sites(self):
return [self.voronoi[inb]['site'] for inb in self.site_voronoi_indices]
@property
def neighb_sites_and_indices(self):
return [{'site': self.voronoi[inb]['site'],
'index': self.voronoi[inb]['index']} for inb in self.site_voronoi_indices]
@property
def coords(self):
coords = [self.structure[self.isite].coords]
coords.extend(self.neighb_coords)
return coords
@property
def normalized_distances(self):
return [self.voronoi[inb]['normalized_distance'] for inb in self.site_voronoi_indices]
@property
def normalized_angles(self):
return [self.voronoi[inb]['normalized_angle'] for inb in self.site_voronoi_indices]
@property
def distances(self):
return [self.voronoi[inb]['distance'] for inb in self.site_voronoi_indices]
@property
def angles(self):
return [self.voronoi[inb]['angle'] for inb in self.site_voronoi_indices]
@property
def sphere_fraction_angles(self):
return [0.25*self.voronoi[inb]['angle']/np.pi for inb in self.site_voronoi_indices]
@property
def info(self):
was = self.normalized_angles
wds = self.normalized_distances
angles = self.angles
distances = self.distances
return {'normalized_angles': was,
'normalized_distances': wds,
'normalized_angles_sum': np.sum(was),
'normalized_angles_mean': np.mean(was),
'normalized_angles_std': np.std(was),
'normalized_angles_min': np.min(was),
'normalized_angles_max': np.max(was),
'normalized_distances_mean': np.mean(wds),
'normalized_distances_std': np.std(wds),
'normalized_distances_min': np.min(wds),
'normalized_distances_max': np.max(wds),
'angles': angles,
'distances': distances,
'angles_sum': np.sum(angles),
'angles_mean': np.mean(angles),
'angles_std': np.std(angles),
'angles_min': np.min(angles),
'angles_max': np.max(angles),
'distances_mean': np.mean(distances),
'distances_std': np.std(distances),
'distances_min': np.min(distances),
'distances_max': np.max(distances)
}
def distance_plateau(self):
all_nbs_normalized_distances_sorted = sorted([nb['normalized_distance'] for nb in self.voronoi],
reverse=True)
maxdist = np.max(self.normalized_distances)
plateau = None
for idist, dist in enumerate(all_nbs_normalized_distances_sorted):
if np.isclose(dist, maxdist,
rtol=0.0, atol=self.detailed_voronoi.normalized_distance_tolerance):
if idist == 0:
plateau = np.inf
else:
plateau = all_nbs_normalized_distances_sorted[idist-1] - maxdist
break
if plateau is None:
raise ValueError('Plateau not found ...')
return plateau
def angle_plateau(self):
all_nbs_normalized_angles_sorted = sorted([nb['normalized_angle'] for nb in self.voronoi])
minang = np.min(self.normalized_angles)
print('minang', minang)
print('all_nbs_normalized_angles_sorted', all_nbs_normalized_angles_sorted)
for nb in self.voronoi:
print(nb)
plateau = None
for iang, ang in enumerate(all_nbs_normalized_angles_sorted):
if np.isclose(ang, minang,
rtol=0.0, atol=self.detailed_voronoi.normalized_angle_tolerance):
if iang == 0:
plateau = minang
else:
plateau = minang - all_nbs_normalized_angles_sorted[iang-1]
break
if plateau is None:
raise ValueError('Plateau not found ...')
return plateau
def voronoi_grid_surface_points(self, additional_condition=1, other_origins='DO_NOTHING'):
"""
Get the surface points in the Voronoi grid for this neighbor from the sources.
The general shape of the points should look like a staircase such as in the following figure :
^
0.0|
|
| B----C
| | |
| | |
a | k D-------E
n | | |
g | | |
l | | |
e | j F----n---------G
| | |
| | |
| A----g-------h----i---------H
|
|
1.0+------------------------------------------------->
1.0 distance 2.0 ->+Inf
:param additional_condition: Additional condition for the neighbors.
:param other_origins: What to do with sources that do not come from the Voronoi grid (e.g. "from hints")
"""
mysrc = []
for src in self.sources:
if src['origin'] == 'dist_ang_ac_voronoi':
if src['ac'] != additional_condition:
continue
mysrc.append(src)
else:
if other_origins == 'DO_NOTHING':
continue
else:
raise NotImplementedError('Nothing implemented for other sources ...')
if len(mysrc) == 0:
return None
dists = [src['dp_dict']['min'] for src in mysrc]
angs = [src['ap_dict']['max'] for src in mysrc]
next_dists = [src['dp_dict']['next'] for src in mysrc]
next_angs = [src['ap_dict']['next'] for src in mysrc]
points_dict = OrderedDict()
pdists = []
pangs = []
for isrc in range(len(mysrc)):
if not any(np.isclose(pdists, dists[isrc])):
pdists.append(dists[isrc])
if not any(np.isclose(pdists, next_dists[isrc])):
pdists.append(next_dists[isrc])
if not any(np.isclose(pangs, angs[isrc])):
pangs.append(angs[isrc])
if not any(np.isclose(pangs, next_angs[isrc])):
pangs.append(next_angs[isrc])
d1_indices = np.argwhere(np.isclose(pdists, dists[isrc])).flatten()
if len(d1_indices) != 1:
raise ValueError('Distance parameter not found ...')
d2_indices = np.argwhere(np.isclose(pdists, next_dists[isrc])).flatten()
if len(d2_indices) != 1:
raise ValueError('Distance parameter not found ...')
a1_indices = np.argwhere(np.isclose(pangs, angs[isrc])).flatten()
if len(a1_indices) != 1:
raise ValueError('Angle parameter not found ...')
a2_indices = np.argwhere(np.isclose(pangs, next_angs[isrc])).flatten()
if len(a2_indices) != 1:
raise ValueError('Angle parameter not found ...')
id1 = d1_indices[0]
id2 = d2_indices[0]
ia1 = a1_indices[0]
ia2 = a2_indices[0]
for id_ia in [(id1, ia1), (id1, ia2), (id2, ia1), (id2, ia2)]:
if id_ia not in points_dict:
points_dict[id_ia] = 0
points_dict[id_ia] += 1
new_pts = []
for pt, pt_nb in points_dict.items():
if pt_nb % 2 == 1:
new_pts.append(pt)
sorted_points = [(0, 0)]
move_ap_index = True
while True:
last_pt = sorted_points[-1]
if move_ap_index: # "Move" the angle parameter
idp = last_pt[0]
iap = None
for pt in new_pts:
if pt[0] == idp and pt != last_pt:
iap = pt[1]
break
else: # "Move" the distance parameter
idp = None
iap = last_pt[1]
for pt in new_pts:
if pt[1] == iap and pt != last_pt:
idp = pt[0]
break
if (idp, iap) == (0, 0):
break
if (idp, iap) in sorted_points:
raise ValueError('Error sorting points ...')
sorted_points.append((idp, iap))
move_ap_index = not move_ap_index
points = [(pdists[idp], pangs[iap]) for (idp, iap) in sorted_points]
return points
@property
def source(self):
if len(self.sources) != 1:
raise RuntimeError('Number of sources different from 1 !')
return self.sources[0]
def add_source(self, source):
if source not in self.sources:
self.sources.append(source)
def __len__(self):
return len(self.site_voronoi_indices)
def __hash__(self):
return len(self.site_voronoi_indices)
def __eq__(self, other):
return self.isite == other.isite and self.site_voronoi_indices == other.site_voronoi_indices
def __ne__(self, other):
return not self == other
def __str__(self):
out = 'Neighbors Set for site #{:d} :\n'.format(self.isite)
out += ' - Coordination number : {:d}\n'.format(len(self))
out += ' - Voronoi indices : {}\n'.format(', '.join(['{:d}'.format(site_voronoi_index)
for site_voronoi_index in self.site_voronoi_indices]))
return out
def as_dict(self):
return {'isite': self.isite,
'site_voronoi_indices': self.site_voronoi_indices,
'sources': self.sources}
@classmethod
def from_dict(cls, dd, structure, detailed_voronoi):
return cls(structure=structure,
isite=dd['isite'],
detailed_voronoi=detailed_voronoi,
site_voronoi_indices=dd['site_voronoi_indices'],
sources=dd['sources'])
def __init__(self, voronoi, valences, sites_map, equivalent_sites,
ce_list, structure, neighbors_sets=None, info=None):
"""
Constructor for the StructureEnvironments object.
:param voronoi: VoronoiContainer object for the structure
:param valences: Valences provided
:param sites_map: Mapping of equivalent sites to the unequivalent sites that have been computed.
:param equivalent_sites: List of list of equivalent sites of the structure
:param struct_sites_to_irreducible_site_list_map: Maps the index of a site to the index of the item in the
list of equivalent sites to which the site belongs.
:param ce_list: List of chemical environments
:param structure: Structure object
"""
self.voronoi = voronoi
self.valences = valences
self.sites_map = sites_map
self.equivalent_sites = equivalent_sites
#self.struct_sites_to_irreducible_site_list_map = struct_sites_to_irreducible_site_list_map
self.ce_list = ce_list
self.structure = structure
if neighbors_sets is None:
self.neighbors_sets = [None] * len(self.structure)
else:
self.neighbors_sets = neighbors_sets
self.info = info
def init_neighbors_sets(self, isite, additional_conditions=None, valences=None):
site_voronoi = self.voronoi.voronoi_list2[isite]
if site_voronoi is None:
return
if additional_conditions is None:
additional_conditions = self.AC.ALL
if (self.AC.ONLY_ACB in additional_conditions or
self.AC.ONLY_ACB_AND_NO_E2SEB) and valences is None:
raise ChemenvError('StructureEnvironments', 'init_neighbors_sets',
'Valences are not given while only_anion_cation_bonds are allowed. Cannot continue')
site_distance_parameters = self.voronoi.neighbors_normalized_distances[isite]
site_angle_parameters = self.voronoi.neighbors_normalized_angles[isite]
# Precompute distance conditions
distance_conditions = []
for idp, dp_dict in enumerate(site_distance_parameters):
distance_conditions.append([])
for inb, voro_nb_dict in enumerate(site_voronoi):
cond = inb in dp_dict['nb_indices']
distance_conditions[idp].append(cond)
# Precompute angle conditions
angle_conditions = []
for iap, ap_dict in enumerate(site_angle_parameters):
angle_conditions.append([])
for inb, voro_nb_dict in enumerate(site_voronoi):
cond = inb in ap_dict['nb_indices']
angle_conditions[iap].append(cond)
# Precompute additional conditions
precomputed_additional_conditions = {ac: [] for ac in additional_conditions}
for inb, voro_nb_dict in enumerate(site_voronoi):
for ac in additional_conditions:
cond = self.AC.check_condition(condition=ac, structure=self.structure,
parameters={'valences': valences,
'neighbor_index': voro_nb_dict['index'],
'site_index': isite})
precomputed_additional_conditions[ac].append(cond)
# Add the neighbors sets based on the distance/angle/additional parameters
for idp, dp_dict in enumerate(site_distance_parameters):
for iap, ap_dict in enumerate(site_angle_parameters):
for iac, ac in enumerate(additional_conditions):
src = {'origin': 'dist_ang_ac_voronoi',
'idp': idp, 'iap': iap, 'dp_dict': dp_dict, 'ap_dict': ap_dict,
'iac': iac, 'ac': ac, 'ac_name': self.AC.CONDITION_DESCRIPTION[ac]}
site_voronoi_indices = [inb for inb, voro_nb_dict in enumerate(site_voronoi)
if (distance_conditions[idp][inb] and
angle_conditions[iap][inb] and
precomputed_additional_conditions[ac][inb])]
nb_set = self.NeighborsSet(structure=self.structure,
isite=isite,
detailed_voronoi=self.voronoi,
site_voronoi_indices=site_voronoi_indices,
sources=src)
self.add_neighbors_set(isite=isite, nb_set=nb_set)
def add_neighbors_set(self, isite, nb_set):
if self.neighbors_sets[isite] is None:
self.neighbors_sets[isite] = {}
self.ce_list[isite] = {}
cn = len(nb_set)
if cn not in self.neighbors_sets[isite]:
self.neighbors_sets[isite][cn] = []
self.ce_list[isite][cn] = []
try:
nb_set_index = self.neighbors_sets[isite][cn].index(nb_set)
self.neighbors_sets[isite][cn][nb_set_index].add_source(nb_set.source)
except ValueError:
self.neighbors_sets[isite][cn].append(nb_set)
self.ce_list[isite][cn].append(None)
def update_coordination_environments(self, isite, cn, nb_set, ce):
if self.ce_list[isite] is None:
self.ce_list[isite] = {}
if cn not in self.ce_list[isite]:
self.ce_list[isite][cn] = []
try:
nb_set_index = self.neighbors_sets[isite][cn].index(nb_set)
except ValueError:
raise ValueError('Neighbors set not found in the structure environments')
if nb_set_index == len(self.ce_list[isite][cn]):
self.ce_list[isite][cn].append(ce)
elif nb_set_index < len(self.ce_list[isite][cn]):
self.ce_list[isite][cn][nb_set_index] = ce
else:
raise ValueError('Neighbors set not yet in ce_list !')
def update_site_info(self, isite, info_dict):
if 'sites_info' not in self.info:
self.info['sites_info'] = [{} for _ in range(len(self.structure))]
self.info['sites_info'][isite].update(info_dict)
def get_coordination_environments(self, isite, cn, nb_set):
if self.ce_list[isite] is None:
return None
if cn not in self.ce_list[isite]:
return None
try:
nb_set_index = self.neighbors_sets[isite][cn].index(nb_set)
except ValueError:
return None
return self.ce_list[isite][cn][nb_set_index]
def get_csm(self, isite, mp_symbol):
csms = self.get_csms(isite, mp_symbol)
if len(csms) != 1:
raise ChemenvError('StructureEnvironments',
'get_csm',
'Number of csms for site #{} with '
'mp_symbol "{}" = {}'.format(str(isite),
mp_symbol,
str(len(csms))))
return csms[0]
def get_csms(self, isite, mp_symbol):
"""
Returns the continuous symmetry measure(s) of site with index isite with respect to the
perfect coordination environment with mp_symbol. For some environments, a given mp_symbol might not
be available (if there is no voronoi parameters leading to a number of neighbours corresponding to
the coordination number of environment mp_symbol). For some environments, a given mp_symbol might
lead to more than one csm (when two or more different voronoi parameters lead to different neighbours
but with same number of neighbours).
:param isite: Index of the site
:param mp_symbol: MP symbol of the perfect environment for which the csm has to be given
:return: List of csms for site isite with respect to geometry mp_symbol
"""
cn = symbol_cn_mapping[mp_symbol]
if cn not in self.ce_list[isite]:
return []
else:
return [envs[mp_symbol] for envs in self.ce_list[isite][cn]]
def plot_csm_and_maps(self, isite, max_csm=8.0):
"""
Plotting of the coordination numbers of a given site for all the distfactor/angfactor parameters. If the
chemical environments are given, a color map is added to the plot, with the lowest continuous symmetry measure
as the value for the color of that distfactor/angfactor set.
:param isite: Index of the site for which the plot has to be done
:param plot_type: How to plot the coordinations
:param title: Title for the figure
:param max_dist: Maximum distance to be plotted when the plotting of the distance is set to 'initial_normalized'
or 'initial_real' (Warning: this is not the same meaning in both cases! In the first case,
the closest atom lies at a "normalized" distance of 1.0 so that 2.0 means refers to this
normalized distance while in the second case, the real distance is used)
:param figsize: Size of the figure to be plotted
:return: Nothing returned, just plot the figure
"""
try:
import matplotlib.pyplot as plt
except ImportError:
print('Plotting Chemical Environments requires matplotlib ... exiting "plot" function')
return
fig = self.get_csm_and_maps(isite=isite, max_csm=max_csm)
if fig is None:
return
plt.show()
def get_csm_and_maps(self, isite, max_csm=8.0, figsize=None, symmetry_measure_type=None):
try:
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
except ImportError:
print('Plotting Chemical Environments requires matplotlib ... exiting "plot" function')
return
if symmetry_measure_type is None:
symmetry_measure_type = 'csm_wcs_ctwcc'
# Initializes the figure
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
gs = GridSpec(2, 1, hspace=0.0, wspace=0.0)
subplot = fig.add_subplot(gs[:])
subplot_distang = subplot.twinx()
ix = 0
cn_maps = []
all_wds = []
all_was = []
max_wd = 0.0
for cn, nb_sets in self.neighbors_sets[isite].items():
for inb_set, nb_set in enumerate(nb_sets):
ce = self.ce_list[isite][cn][inb_set]
if ce is None:
continue
mingeoms = ce.minimum_geometries(max_csm=max_csm)
if len(mingeoms) == 0:
continue
wds = nb_set.normalized_distances
max_wd = max(max_wd, max(wds))
all_wds.append(wds)
all_was.append(nb_set.normalized_angles)
for mp_symbol, cg_dict in mingeoms:
csm = cg_dict['other_symmetry_measures'][symmetry_measure_type]
subplot.plot(ix, csm, 'ob')
subplot.annotate(mp_symbol, xy = (ix, csm))
cn_maps.append((cn, inb_set))
ix += 1
if max_wd < 1.225:
ymax_wd = 1.25
yticks_wd = np.linspace(1.0, ymax_wd, 6)
elif max_wd < 1.36:
ymax_wd = 1.4
yticks_wd = np.linspace(1.0, ymax_wd, 5)
elif max_wd < 1.45:
ymax_wd = 1.5
yticks_wd = np.linspace(1.0, ymax_wd, 6)
elif max_wd < 1.55:
ymax_wd = 1.6
yticks_wd = np.linspace(1.0, ymax_wd, 7)
elif max_wd < 1.75:
ymax_wd = 1.8
yticks_wd = np.linspace(1.0, ymax_wd, 5)
elif max_wd < 1.95:
ymax_wd = 2.0
yticks_wd = np.linspace(1.0, ymax_wd, 6)
elif max_wd < 2.35:
ymax_wd = 2.5
yticks_wd = np.linspace(1.0, ymax_wd, 7)
else:
ymax_wd = np.ceil(1.1*max_wd)
yticks_wd = np.linspace(1.0, ymax_wd, 6)
yticks_wa = np.linspace(0.0, 1.0, 6)
frac_bottom = 0.05
frac_top = 0.05
frac_middle = 0.1
yamin = frac_bottom
yamax = 0.5 - frac_middle / 2
ydmin = 0.5 + frac_middle / 2
ydmax = 1.0 - frac_top
def yang(wa):
return (yamax-yamin) * np.array(wa) + yamin
def ydist(wd):
return (np.array(wd) - 1.0) / (ymax_wd - 1.0) * (ydmax - ydmin) + ydmin
for ix, was in enumerate(all_was):
subplot_distang.plot(0.2+ix*np.ones_like(was), yang(was), '<g')
if np.mod(ix, 2) == 0:
alpha = 0.3
else:
alpha = 0.1
subplot_distang.fill_between([-0.5+ix, 0.5+ix],
[1.0, 1.0], 0.0,
facecolor='k', alpha=alpha, zorder=-1000)
for ix, wds in enumerate(all_wds):
subplot_distang.plot(0.2+ix*np.ones_like(wds), ydist(wds), 'sm')
subplot_distang.plot([-0.5, len(cn_maps)], [0.5, 0.5], 'k--', alpha=0.5)
yticks = yang(yticks_wa).tolist()
yticks.extend(ydist(yticks_wd).tolist())
yticklabels = yticks_wa.tolist()
yticklabels.extend(yticks_wd.tolist())
subplot_distang.set_yticks(yticks)
subplot_distang.set_yticklabels(yticklabels)
fake_subplot_ang = fig.add_subplot(gs[1], frame_on=False)
fake_subplot_dist = fig.add_subplot(gs[0], frame_on=False)
fake_subplot_ang.set_yticks([])
fake_subplot_dist.set_yticks([])
fake_subplot_ang.set_xticks([])
fake_subplot_dist.set_xticks([])
fake_subplot_ang.set_ylabel('Angle parameter', labelpad=45, rotation=-90)
fake_subplot_dist.set_ylabel('Distance parameter', labelpad=45, rotation=-90)
fake_subplot_ang.yaxis.set_label_position("right")
fake_subplot_dist.yaxis.set_label_position("right")
subplot_distang.set_ylim([0.0, 1.0])
subplot.set_xticks(range(len(cn_maps)))
subplot.set_ylabel('Continuous symmetry measure')
subplot.set_xlim([-0.5, len(cn_maps)-0.5])
subplot_distang.set_xlim([-0.5, len(cn_maps)-0.5])
subplot.set_xticklabels([str(cn_map) for cn_map in cn_maps])
return fig, subplot
def get_environments_figure(self, isite, plot_type=None, title='Coordination numbers', max_dist=2.0,
additional_condition=AC.ONLY_ACB, colormap=None, figsize=None,
strategy=None):
"""
Plotting of the coordination environments of a given site for all the distfactor/angfactor regions. The
chemical environments with the lowest continuous symmetry measure is shown for each distfactor/angfactor
region as the value for the color of that distfactor/angfactor region (using a colormap).
:param isite: Index of the site for which the plot has to be done
:param plot_type: How to plot the coordinations
:param title: Title for the figure
:param max_dist: Maximum distance to be plotted when the plotting of the distance is set to 'initial_normalized'
or 'initial_real' (Warning: this is not the same meaning in both cases! In the first case,
the closest atom lies at a "normalized" distance of 1.0 so that 2.0 means refers to this
normalized distance while in the second case, the real distance is used)
:param figsize: Size of the figure to be plotted
:return: The figure object to be plotted or saved to file
"""
try:
import matplotlib.pyplot as mpl
from matplotlib import cm
from matplotlib.colors import Normalize, LinearSegmentedColormap, ListedColormap
from matplotlib.patches import Rectangle, Polygon
except ImportError:
print('Plotting Chemical Environments requires matplotlib ... exiting "plot" function')
return
#Initializes the figure
if figsize is None:
fig = mpl.figure()
else:
fig = mpl.figure(figsize=figsize)
subplot = fig.add_subplot(111)
#Initializes the distance and angle parameters
if plot_type is None:
plot_type = {'distance_parameter': ('initial_normalized', None),
'angle_parameter': ('initial_normalized_inverted', None)}
if colormap is None:
mycm = cm.jet
else:
mycm = colormap
mymin = 0.0
mymax = 10.0
norm = Normalize(vmin=mymin, vmax=mymax)
scalarmap = cm.ScalarMappable(norm=norm, cmap=mycm)
dist_limits = [1.0, max_dist]
ang_limits = [0.0, 1.0]
if plot_type['distance_parameter'][0] == 'one_minus_inverse_alpha_power_n':
if plot_type['distance_parameter'][1] is None:
exponent = 3
else:
exponent = plot_type['distance_parameter'][1]['exponent']
xlabel = 'Distance parameter : $1.0-\\frac{{1.0}}{{\\alpha^{{{:d}}}}}$'.format(exponent)
def dp_func(dp):
return 1.0-1.0/np.power(dp, exponent)
elif plot_type['distance_parameter'][0] == 'initial_normalized':
xlabel = 'Distance parameter : $\\alpha$'
def dp_func(dp):
return dp
else:
raise ValueError('Wrong value for distance parameter plot type "{}"'.
format(plot_type['distance_parameter'][0]))
if plot_type['angle_parameter'][0] == 'one_minus_gamma':
ylabel = 'Angle parameter : $1.0-\\gamma$'
def ap_func(ap):
return 1.0-ap
elif plot_type['angle_parameter'][0] in ['initial_normalized_inverted', 'initial_normalized']:
ylabel = 'Angle parameter : $\\gamma$'
def ap_func(ap):
return ap
else:
raise ValueError('Wrong value for angle parameter plot type "{}"'.
format(plot_type['angle_parameter'][0]))
dist_limits = [dp_func(dp) for dp in dist_limits]
ang_limits = [ap_func(ap) for ap in ang_limits]
for cn, cn_nb_sets in self.neighbors_sets[isite].items():
for inb_set, nb_set in enumerate(cn_nb_sets):
nb_set_surface_pts = nb_set.voronoi_grid_surface_points()
if nb_set_surface_pts is None:
continue
ce = self.ce_list[isite][cn][inb_set]
if ce is None:
mycolor = 'w'
myinvcolor = 'k'
mytext = '{:d}'.format(cn)
else:
mingeom = ce.minimum_geometry()
if mingeom is not None:
mp_symbol = mingeom[0]
csm = mingeom[1]['symmetry_measure']
mycolor = scalarmap.to_rgba(csm)
myinvcolor = [1.0 - mycolor[0], 1.0 - mycolor[1], 1.0 - mycolor[2], 1.0]
mytext = '{}'.format(mp_symbol)
else:
mycolor = 'w'
myinvcolor = 'k'
mytext = '{:d}'.format(cn)
nb_set_surface_pts = [(dp_func(pt[0]), ap_func(pt[1])) for pt in nb_set_surface_pts]
polygon = Polygon(nb_set_surface_pts, closed=True, edgecolor='k', facecolor=mycolor, linewidth=1.2)
subplot.add_patch(polygon)
myipt = len(nb_set_surface_pts) / 2
ipt = int(myipt)
if myipt != ipt:
raise RuntimeError('Number of surface points not even')
patch_center = ((nb_set_surface_pts[0][0] + min(nb_set_surface_pts[ipt][0], dist_limits[1])) / 2,
(nb_set_surface_pts[0][1] + nb_set_surface_pts[ipt][1]) / 2)
if (np.abs(nb_set_surface_pts[-1][1] - nb_set_surface_pts[-2][1]) > 0.06 and
np.abs(min(nb_set_surface_pts[-1][0], dist_limits[1]) - nb_set_surface_pts[0][0]) > 0.125):
xytext = ((min(nb_set_surface_pts[-1][0], dist_limits[1]) + nb_set_surface_pts[0][0]) / 2,
(nb_set_surface_pts[-1][1] + nb_set_surface_pts[-2][1]) / 2)
subplot.annotate(mytext, xy=xytext,
ha='center', va='center', color=myinvcolor, fontsize='x-small')
elif (np.abs(nb_set_surface_pts[ipt][1] - nb_set_surface_pts[0][1]) > 0.1 and
np.abs(min(nb_set_surface_pts[ipt][0], dist_limits[1]) - nb_set_surface_pts[0][0]) > 0.125):
xytext = patch_center
subplot.annotate(mytext, xy=xytext,
ha='center', va='center', color=myinvcolor, fontsize='x-small')
subplot.set_title(title)
subplot.set_xlabel(xlabel)
subplot.set_ylabel(ylabel)
dist_limits.sort()
ang_limits.sort()
subplot.set_xlim(dist_limits)
subplot.set_ylim(ang_limits)
if strategy is not None:
try:
strategy.add_strategy_visualization_to_subplot(subplot=subplot)
except:
pass
if plot_type['angle_parameter'][0] == 'initial_normalized_inverted':
subplot.axes.invert_yaxis()
scalarmap.set_array([mymin, mymax])
cb = fig.colorbar(scalarmap, ax=subplot, extend='max')
cb.set_label('Continuous symmetry measure')
return fig, subplot
def plot_environments(self, isite, plot_type=None, title='Coordination numbers', max_dist=2.0,
additional_condition=AC.ONLY_ACB, figsize=None, strategy=None):
"""
Plotting of the coordination numbers of a given site for all the distfactor/angfactor parameters. If the
chemical environments are given, a color map is added to the plot, with the lowest continuous symmetry measure
as the value for the color of that distfactor/angfactor set.
:param isite: Index of the site for which the plot has to be done
:param plot_type: How to plot the coordinations
:param title: Title for the figure
:param max_dist: Maximum distance to be plotted when the plotting of the distance is set to 'initial_normalized'
or 'initial_real' (Warning: this is not the same meaning in both cases! In the first case,
the closest atom lies at a "normalized" distance of 1.0 so that 2.0 means refers to this
normalized distance while in the second case, the real distance is used)
:param figsize: Size of the figure to be plotted
:return: Nothing returned, just plot the figure
"""
fig, subplot = self.get_environments_figure(isite=isite, plot_type=plot_type, title=title, max_dist=max_dist,
additional_condition=additional_condition, figsize=figsize,
strategy=strategy)
if fig is None:
return
fig.show()
def save_environments_figure(self, isite, imagename='image.png', plot_type=None, title='Coordination numbers',
max_dist=2.0, additional_condition=AC.ONLY_ACB, figsize=None):
fig, subplot = self.get_environments_figure(isite=isite, plot_type=plot_type, title=title, max_dist=max_dist,
additional_condition=additional_condition, figsize=figsize)
if fig is None:
return
fig.savefig(imagename)
def differences_wrt(self, other):
differences = []
if self.structure != other.structure:
differences.append({'difference': 'structure',
'comparison': '__eq__',
'self': self.structure,
'other': other.structure})
differences.append({'difference': 'PREVIOUS DIFFERENCE IS DISMISSIVE',
'comparison': 'differences_wrt'})
return differences
if self.valences != other.valences:
differences.append({'difference': 'valences',
'comparison': '__eq__',
'self': self.valences,
'other': other.valences})
if self.info != other.info:
differences.append({'difference': 'info',
'comparison': '__eq__',
'self': self.info,
'other': other.info})
if self.voronoi != other.voronoi:
if self.voronoi.is_close_to(other.voronoi):
differences.append({'difference': 'voronoi',
'comparison': '__eq__',
'self': self.voronoi,
'other': other.voronoi})
differences.append({'difference': 'PREVIOUS DIFFERENCE IS DISMISSIVE',
'comparison': 'differences_wrt'})
return differences
else:
differences.append({'difference': 'voronoi',
'comparison': 'is_close_to',
'self': self.voronoi,
'other': other.voronoi})
# TODO: make it possible to have "close" voronoi's
differences.append({'difference': 'PREVIOUS DIFFERENCE IS DISMISSIVE',
'comparison': 'differences_wrt'})
return differences
for isite, self_site_nb_sets in enumerate(self.neighbors_sets):
other_site_nb_sets = other.neighbors_sets[isite]
if self_site_nb_sets is None:
if other_site_nb_sets is None:
continue
else:
differences.append({'difference': 'neighbors_sets[isite={:d}]'.format(isite),
'comparison': 'has_neighbors',
'self': 'None',
'other': set(other_site_nb_sets.keys())})
continue
elif other_site_nb_sets is None:
differences.append({'difference': 'neighbors_sets[isite={:d}]'.format(isite),
'comparison': 'has_neighbors',
'self': set(self_site_nb_sets.keys()),
'other': 'None'})
continue
self_site_cns = set(self_site_nb_sets.keys())
other_site_cns = set(other_site_nb_sets.keys())
if self_site_cns != other_site_cns:
differences.append({'difference': 'neighbors_sets[isite={:d}]'.format(isite),
'comparison': 'coordination_numbers',
'self': self_site_cns,
'other': other_site_cns})
common_cns = self_site_cns.intersection(other_site_cns)
for cn in common_cns:
other_site_cn_nb_sets = other_site_nb_sets[cn]
self_site_cn_nb_sets = self_site_nb_sets[cn]
set_self_site_cn_nb_sets = set(self_site_cn_nb_sets)
set_other_site_cn_nb_sets = set(other_site_cn_nb_sets)
if set_self_site_cn_nb_sets != set_other_site_cn_nb_sets:
differences.append({'difference': 'neighbors_sets[isite={:d}][cn={:d}]'.format(isite, cn),
'comparison': 'neighbors_sets',
'self': self_site_cn_nb_sets,
'other': other_site_cn_nb_sets})
common_nb_sets = set_self_site_cn_nb_sets.intersection(set_other_site_cn_nb_sets)
for nb_set in common_nb_sets:
inb_set_self = self_site_cn_nb_sets.index(nb_set)
inb_set_other = other_site_cn_nb_sets.index(nb_set)
self_ce = self.ce_list[isite][cn][inb_set_self]
other_ce = other.ce_list[isite][cn][inb_set_other]
if self_ce != other_ce:
if self_ce.is_close_to(other_ce):
differences.append({'difference': 'ce_list[isite={:d}][cn={:d}]'
'[inb_set={:d}]'.format(isite, cn, inb_set_self),
'comparison': '__eq__',
'self': self_ce,
'other': other_ce})
else:
differences.append({'difference': 'ce_list[isite={:d}][cn={:d}]'
'[inb_set={:d}]'.format(isite, cn, inb_set_self),
'comparison': 'is_close_to',
'self': self_ce,
'other': other_ce})
return differences
def __eq__(self, other):
if len(self.ce_list) != len(other.ce_list):
return False
if self.voronoi != other.voronoi:
return False
if len(self.valences) != len(other.valences):
return False
if self.sites_map != other.sites_map:
return False
if self.equivalent_sites != other.equivalent_sites:
return False
if self.structure != other.structure:
return False
if self.info != other.info:
return False
for isite, site_ces in enumerate(self.ce_list):
site_nb_sets_self = self.neighbors_sets[isite]
site_nb_sets_other = other.neighbors_sets[isite]
if site_nb_sets_self != site_nb_sets_other:
return False
if site_ces != other.ce_list[isite]:
return False
return True
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the StructureEnvironments object.
:return: Bson-serializable dict representation of the StructureEnvironments object.
"""
ce_list_dict = [{str(cn): [ce.as_dict() if ce is not None else None for ce in ce_dict[cn]]
for cn in ce_dict} if ce_dict is not None else None for ce_dict in self.ce_list]
nbs_sets_dict = [{str(cn): [nb_set.as_dict() for nb_set in nb_sets]
for cn, nb_sets in site_nbs_sets.items()}
if site_nbs_sets is not None else None
for site_nbs_sets in self.neighbors_sets]
info_dict = {key: val for key, val in self.info.items() if key not in ['sites_info']}
info_dict['sites_info'] = [{'nb_sets_info': {str(cn): {str(inb_set): nb_set_info
for inb_set, nb_set_info in cn_sets.items()}
for cn, cn_sets in site_info['nb_sets_info'].items()},
'time': site_info['time']} if 'nb_sets_info' in site_info else {}
for site_info in self.info['sites_info']]
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"voronoi": self.voronoi.as_dict(),
"valences": self.valences,
"sites_map": self.sites_map,
"equivalent_sites": [[ps.as_dict() for ps in psl] for psl in self.equivalent_sites],
"ce_list": ce_list_dict,
"structure": self.structure.as_dict(),
"neighbors_sets": nbs_sets_dict,
"info": info_dict}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the StructureEnvironments object from a dict representation of the StructureEnvironments created
using the as_dict method.
:param d: dict representation of the StructureEnvironments object
:return: StructureEnvironments object
"""
ce_list = [None if (ce_dict == 'None' or ce_dict is None) else {
int(cn): [None if (ced is None or ced == 'None') else
ChemicalEnvironments.from_dict(ced) for ced in ce_dict[cn]]
for cn in ce_dict} for ce_dict in d['ce_list']]
voronoi = DetailedVoronoiContainer.from_dict(d['voronoi'])
structure = Structure.from_dict(d['structure'])
neighbors_sets = [{int(cn): [cls.NeighborsSet.from_dict(dd=nb_set_dict,
structure=structure,
detailed_voronoi=voronoi)
for nb_set_dict in nb_sets]
for cn, nb_sets in site_nbs_sets_dict.items()}
if site_nbs_sets_dict is not None else None
for site_nbs_sets_dict in d['neighbors_sets']]
info = {key: val for key, val in d['info'].items() if key not in ['sites_info']}
if 'sites_info' in d['info']:
info['sites_info'] = [{'nb_sets_info': {int(cn): {int(inb_set): nb_set_info
for inb_set, nb_set_info in cn_sets.items()}
for cn, cn_sets in site_info['nb_sets_info'].items()},
'time': site_info['time']} if 'nb_sets_info' in site_info else {}
for site_info in d['info']['sites_info']]
return cls(voronoi=voronoi, valences=d['valences'],
sites_map=d['sites_map'],
equivalent_sites=[[PeriodicSite.from_dict(psd) for psd in psl] for psl in d['equivalent_sites']],
ce_list=ce_list, structure=structure,
neighbors_sets=neighbors_sets,
info=info)
class LightStructureEnvironments(MSONable):
"""
Class used to store the chemical environments of a given structure obtained from a given ChemenvStrategy. Currently,
only strategies leading to the determination of a unique environment for each site is allowed
This class does not store all the information contained in the StructureEnvironments object, only the coordination
environment found
"""
DELTA_MAX_OXIDATION_STATE = 0.1
DEFAULT_STATISTICS_FIELDS = ['anion_list', 'anion_atom_list', 'cation_list', 'cation_atom_list',
'neutral_list', 'neutral_atom_list',
'atom_coordination_environments_present',
'ion_coordination_environments_present',
'fraction_atom_coordination_environments_present',
'fraction_ion_coordination_environments_present',
'coordination_environments_atom_present',
'coordination_environments_ion_present']
class NeighborsSet():
"""
Class used to store a given set of neighbors of a given site (based on a list of sites, the voronoi
container is not part of the LightStructureEnvironments object).
"""
def __init__(self, structure, isite, all_nbs_sites, all_nbs_sites_indices):
self.structure = structure
self.isite = isite
self.all_nbs_sites = all_nbs_sites
myset = set(all_nbs_sites_indices)
if len(myset) != len(all_nbs_sites_indices):
raise ValueError('Set of neighbors contains duplicates !')
self.all_nbs_sites_indices = sorted(myset)
self.all_nbs_sites_indices_unsorted = all_nbs_sites_indices
self.all_nbs_sites_indices_and_image = []
@property
def neighb_coords(self):
return [self.all_nbs_sites[inb]['site'].coords for inb in self.all_nbs_sites_indices_unsorted]
@property
def neighb_sites(self):
return [self.all_nbs_sites[inb]['site'] for inb in self.all_nbs_sites_indices_unsorted]
@property
def neighb_sites_and_indices(self):
return [{'site': self.all_nbs_sites[inb]['site'],
'index': self.all_nbs_sites[inb]['index']} for inb in self.all_nbs_sites_indices_unsorted]
@property
def neighb_indices_and_images(self):
return [{'index': self.all_nbs_sites[inb]['index'],
'image_cell': self.all_nbs_sites[inb]['image_cell']}
for inb in self.all_nbs_sites_indices_unsorted]
def __len__(self):
return len(self.all_nbs_sites_indices)
def __hash__(self):
return len(self.all_nbs_sites_indices)
def __eq__(self, other):
return self.isite == other.isite and self.all_nbs_sites_indices == other.all_nbs_sites_indices
def __ne__(self, other):
return not self == other
def __str__(self):
out = 'Neighbors Set for site #{:d} :\n'.format(self.isite)
out += ' - Coordination number : {:d}\n'.format(len(self))
out += ' - Neighbors sites indices : {}' \
'\n'.format(', '.join(['{:d}'.format(nb_list_index)
for nb_list_index in self.all_nbs_sites_indices]))
return out
def as_dict(self):
return {'isite': self.isite,
'all_nbs_sites_indices': self.all_nbs_sites_indices_unsorted}
# 'all_nbs_sites_indices_unsorted': self.all_nbs_sites_indices_unsorted}
@classmethod
def from_dict(cls, dd, structure, all_nbs_sites):
return cls(structure=structure,
isite=dd['isite'],
all_nbs_sites=all_nbs_sites,
all_nbs_sites_indices=dd['all_nbs_sites_indices'])
def __init__(self, strategy,
coordination_environments=None, all_nbs_sites=None, neighbors_sets=None,
structure=None, valences=None, valences_origin=None):
"""
Constructor for the LightStructureEnvironments object.
"""
self.strategy = strategy
self.statistics_dict = None
self.coordination_environments = coordination_environments
self._all_nbs_sites = all_nbs_sites
self.neighbors_sets = neighbors_sets
self.structure = structure
self.valences = valences
self.valences_origin = valences_origin
@classmethod
def from_structure_environments(cls, strategy, structure_environments, valences=None, valences_origin=None):
structure = structure_environments.structure
strategy.set_structure_environments(structure_environments=structure_environments)
coordination_environments = [None] * len(structure)
neighbors_sets = [None] * len(structure)
_all_nbs_sites = []
my_all_nbs_sites = []
if valences is None:
valences = structure_environments.valences
if valences_origin is None:
valences_origin = 'from_structure_environments'
else:
if valences_origin is None:
valences_origin = 'user-specified'
for isite, site in enumerate(structure):
site_ces_and_nbs_list = strategy.get_site_ce_fractions_and_neighbors(site, strategy_info=True)
if site_ces_and_nbs_list is None:
continue
coordination_environments[isite] = []
neighbors_sets[isite] = []
site_ces = []
site_nbs_sets = []
for ce_and_neighbors in site_ces_and_nbs_list:
_all_nbs_sites_indices = []
# Coordination environment
ce_dict = {'ce_symbol': ce_and_neighbors['ce_symbol'],
'ce_fraction': ce_and_neighbors['ce_fraction']}
if ce_and_neighbors['ce_dict'] is not None:
csm = ce_and_neighbors['ce_dict']['other_symmetry_measures'][strategy.symmetry_measure_type]
else:
csm = None
ce_dict['csm'] = csm
ce_dict['permutation'] = ce_and_neighbors['ce_dict']['permutation']
site_ces.append(ce_dict)
# Neighbors
neighbors = ce_and_neighbors['neighbors']
for nb_site_and_index in neighbors:
nb_site = nb_site_and_index['site']
try:
nb_allnbs_sites_index = my_all_nbs_sites.index(nb_site)
except ValueError:
nb_index_unitcell = nb_site_and_index['index']
diff = nb_site.frac_coords - structure[nb_index_unitcell].frac_coords
rounddiff = np.round(diff)
if not np.allclose(diff, rounddiff):
raise ValueError('Weird, differences between one site in a periodic image cell is not '
'integer ...')
nb_image_cell = np.array(rounddiff, np.int)
nb_allnbs_sites_index = len(_all_nbs_sites)
_all_nbs_sites.append({'site': nb_site,
'index': nb_index_unitcell,
'image_cell': nb_image_cell})
my_all_nbs_sites.append(nb_site)
_all_nbs_sites_indices.append(nb_allnbs_sites_index)
nb_set = cls.NeighborsSet(structure=structure, isite=isite,
all_nbs_sites=_all_nbs_sites,
all_nbs_sites_indices=_all_nbs_sites_indices)
site_nbs_sets.append(nb_set)
coordination_environments[isite] = site_ces
neighbors_sets[isite] = site_nbs_sets
return cls(strategy=strategy,
coordination_environments=coordination_environments,
all_nbs_sites=_all_nbs_sites,
neighbors_sets=neighbors_sets,
structure=structure, valences=valences,
valences_origin=valences_origin)
def setup_statistic_lists(self):
self.statistics_dict = {'valences_origin': self.valences_origin,
'anion_list': {}, # OK
'anion_number': None, # OK
'anion_atom_list': {}, # OK
'anion_atom_number': None, # OK
'cation_list': {}, # OK
'cation_number': None, # OK
'cation_atom_list': {}, # OK
'cation_atom_number': None, # OK
'neutral_list': {}, # OK
'neutral_number': None, # OK
'neutral_atom_list': {}, # OK
'neutral_atom_number': None, # OK
'atom_coordination_environments_present': {}, # OK
'ion_coordination_environments_present': {}, # OK
'coordination_environments_ion_present': {}, # OK
'coordination_environments_atom_present': {}, # OK
'fraction_ion_coordination_environments_present': {}, # OK
'fraction_atom_coordination_environments_present': {}, # OK
'fraction_coordination_environments_ion_present': {}, # OK
'fraction_coordination_environments_atom_present': {}, # OK
'count_ion_present': {}, # OK
'count_atom_present': {}, # OK
'count_coordination_environments_present': {}}
atom_stat = self.statistics_dict['atom_coordination_environments_present']
ce_atom_stat = self.statistics_dict['coordination_environments_atom_present']
fraction_atom_stat = self.statistics_dict['fraction_atom_coordination_environments_present']
fraction_ce_atom_stat = self.statistics_dict['fraction_coordination_environments_atom_present']
count_atoms = self.statistics_dict['count_atom_present']
count_ce = self.statistics_dict['count_coordination_environments_present']
for isite, site in enumerate(self.structure):
# Building anion and cation list
site_species = []
if self.valences != 'undefined':
for sp, occ in site.species.items():
valence = self.valences[isite]
strspecie = str(Specie(sp.symbol, valence))
if valence < 0:
specielist = self.statistics_dict['anion_list']
atomlist = self.statistics_dict['anion_atom_list']
elif valence > 0:
specielist = self.statistics_dict['cation_list']
atomlist = self.statistics_dict['cation_atom_list']
else:
specielist = self.statistics_dict['neutral_list']
atomlist = self.statistics_dict['neutral_atom_list']
if strspecie not in specielist:
specielist[strspecie] = occ
else:
specielist[strspecie] += occ
if sp.symbol not in atomlist:
atomlist[sp.symbol] = occ
else:
atomlist[sp.symbol] += occ
site_species.append((sp.symbol, valence, occ))
# Building environments lists
if self.coordination_environments[isite] is not None:
site_envs = [(ce_piece_dict['ce_symbol'], ce_piece_dict['ce_fraction'])
for ce_piece_dict in self.coordination_environments[isite]]
for ce_symbol, fraction in site_envs:
if fraction is None:
continue
if ce_symbol not in count_ce:
count_ce[ce_symbol] = 0.0
count_ce[ce_symbol] += fraction
for sp, occ in site.species.items():
elmt = sp.symbol
if elmt not in atom_stat:
atom_stat[elmt] = {}
count_atoms[elmt] = 0.0
count_atoms[elmt] += occ
for ce_symbol, fraction in site_envs:
if fraction is None:
continue
if ce_symbol not in atom_stat[elmt]:
atom_stat[elmt][ce_symbol] = 0.0
atom_stat[elmt][ce_symbol] += occ * fraction
if ce_symbol not in ce_atom_stat:
ce_atom_stat[ce_symbol] = {}
if elmt not in ce_atom_stat[ce_symbol]:
ce_atom_stat[ce_symbol][elmt] = 0.0
ce_atom_stat[ce_symbol][elmt] += occ * fraction
if self.valences != 'undefined':
ion_stat = self.statistics_dict['ion_coordination_environments_present']
ce_ion_stat = self.statistics_dict['coordination_environments_ion_present']
count_ions = self.statistics_dict['count_ion_present']
for elmt, oxi_state, occ in site_species:
if elmt not in ion_stat:
ion_stat[elmt] = {}
count_ions[elmt] = {}
if oxi_state not in ion_stat[elmt]:
ion_stat[elmt][oxi_state] = {}
count_ions[elmt][oxi_state] = 0.0
count_ions[elmt][oxi_state] += occ
for ce_symbol, fraction in site_envs:
if fraction is None:
continue
if ce_symbol not in ion_stat[elmt][oxi_state]:
ion_stat[elmt][oxi_state][ce_symbol] = 0.0
ion_stat[elmt][oxi_state][ce_symbol] += occ * fraction
if ce_symbol not in ce_ion_stat:
ce_ion_stat[ce_symbol] = {}
if elmt not in ce_ion_stat[ce_symbol]:
ce_ion_stat[ce_symbol][elmt] = {}
if oxi_state not in ce_ion_stat[ce_symbol][elmt]:
ce_ion_stat[ce_symbol][elmt][oxi_state] = 0.0
ce_ion_stat[ce_symbol][elmt][oxi_state] += occ * fraction
self.statistics_dict['anion_number'] = len(self.statistics_dict['anion_list'])
self.statistics_dict['anion_atom_number'] = len(self.statistics_dict['anion_atom_list'])
self.statistics_dict['cation_number'] = len(self.statistics_dict['cation_list'])
self.statistics_dict['cation_atom_number'] = len(self.statistics_dict['cation_atom_list'])
self.statistics_dict['neutral_number'] = len(self.statistics_dict['neutral_list'])
self.statistics_dict['neutral_atom_number'] = len(self.statistics_dict['neutral_atom_list'])
for elmt, envs in atom_stat.items():
sumelement = count_atoms[elmt]
fraction_atom_stat[elmt] = {env: fraction / sumelement for env, fraction in envs.items()}
for ce_symbol, atoms in ce_atom_stat.items():
sumsymbol = count_ce[ce_symbol]
fraction_ce_atom_stat[ce_symbol] = {atom: fraction / sumsymbol for atom, fraction in atoms.items()}
ion_stat = self.statistics_dict['ion_coordination_environments_present']
fraction_ion_stat = self.statistics_dict['fraction_ion_coordination_environments_present']
ce_ion_stat = self.statistics_dict['coordination_environments_ion_present']
fraction_ce_ion_stat = self.statistics_dict['fraction_coordination_environments_ion_present']
count_ions = self.statistics_dict['count_ion_present']
for elmt, oxi_states_envs in ion_stat.items():
fraction_ion_stat[elmt] = {}
for oxi_state, envs in oxi_states_envs.items():
sumspecie = count_ions[elmt][oxi_state]
fraction_ion_stat[elmt][oxi_state] = {env: fraction / sumspecie
for env, fraction in envs.items()}
for ce_symbol, ions in ce_ion_stat.items():
fraction_ce_ion_stat[ce_symbol] = {}
sum_ce = np.sum([np.sum(list(oxistates.values())) for elmt, oxistates in ions.items()])
for elmt, oxistates in ions.items():
fraction_ce_ion_stat[ce_symbol][elmt] = {oxistate: fraction / sum_ce
for oxistate, fraction in oxistates.items()}
def get_site_info_for_specie_ce(self, specie, ce_symbol, min_fraction=0.0):
element = specie.symbol
oxi_state = specie.oxi_state
isites = []
csms = []
fractions = []
for isite, site in enumerate(self.structure):
if element in [sp.symbol for sp in site.species]:
if self.valences == 'undefined' or oxi_state == self.valences[isite]:
for ce_dict in self.coordination_environments[isite]:
if ce_symbol == ce_dict['ce_symbol']:
isites.append(isite)
csms.append(ce_dict['csm'])
fractions.append(ce_dict['ce_fraction'])
return {'isites': isites, 'fractions': fractions, 'csms': csms}
def get_site_info_for_specie_allces(self, specie, min_fraction=0.0):
allces = {}
element = specie.symbol
oxi_state = specie.oxi_state
for isite, site in enumerate(self.structure):
if element in [sp.symbol for sp in site.species]:
if self.valences == 'undefined' or oxi_state == self.valences[isite]:
if self.coordination_environments[isite] is None:
continue
for ce_dict in self.coordination_environments[isite]:
if ce_dict['ce_fraction'] < min_fraction:
continue
if ce_dict['ce_symbol'] not in allces:
allces[ce_dict['ce_symbol']] = {'isites': [], 'fractions': [], 'csms': []}
allces[ce_dict['ce_symbol']]['isites'].append(isite)
allces[ce_dict['ce_symbol']]['fractions'].append(ce_dict['ce_fraction'])
allces[ce_dict['ce_symbol']]['csms'].append(ce_dict['csm'])
return allces
def get_statistics(self, statistics_fields=DEFAULT_STATISTICS_FIELDS, bson_compatible=False):
if self.statistics_dict is None:
self.setup_statistic_lists()
if statistics_fields == 'ALL':
statistics_fields = [key for key in self.statistics_dict]
if bson_compatible:
dd = jsanitize({field: self.statistics_dict[field] for field in statistics_fields})
else:
dd = {field: self.statistics_dict[field] for field in statistics_fields}
return dd
def contains_only_one_anion_atom(self, anion_atom):
return (len(self.statistics_dict['anion_atom_list']) == 1 and
anion_atom in self.statistics_dict['anion_atom_list'])
def contains_only_one_anion(self, anion):
return len(self.statistics_dict['anion_list']) == 1 and anion in self.statistics_dict['anion_list']
def site_contains_environment(self, isite, ce_symbol):
if self.coordination_environments[isite] is None:
return False
return ce_symbol in [ce_dict['ce_symbol'] for ce_dict in self.coordination_environments[isite]]
def site_has_clear_environment(self, isite, conditions=None):
if self.coordination_environments[isite] is None:
raise ValueError('Coordination environments have not been determined for site {:d}'.format(isite))
if conditions is None:
return len(self.coordination_environments[isite]) == 1
ce = max(self.coordination_environments[isite], key=lambda x: x['ce_fraction'])
for condition in conditions:
target = condition['target']
if target == 'ce_fraction':
if ce[target] < condition['minvalue']:
return False
elif target == 'csm':
if ce[target] > condition['maxvalue']:
return False
elif target == 'number_of_ces':
if ce[target] > condition['maxnumber']:
return False
else:
raise ValueError('Target "{}" for condition of clear environment is not allowed'.format(target))
pass
return True
def structure_has_clear_environments(self, conditions=None, skip_none=True, skip_empty=False):
for isite in range(len(self.structure)):
if self.coordination_environments[isite] is None:
if skip_none:
continue
else:
return False
if len(self.coordination_environments[isite]) == 0:
if skip_empty:
continue
else:
return False
if not self.site_has_clear_environment(isite=isite, conditions=conditions):
return False
return True
def clear_environments(self, conditions=None):
clear_envs_list = set()
for isite in range(len(self.structure)):
if self.coordination_environments[isite] is None:
continue
if len(self.coordination_environments[isite]) == 0:
continue
if self.site_has_clear_environment(isite=isite, conditions=conditions):
ce = max(self.coordination_environments[isite], key=lambda x: x['ce_fraction'])
clear_envs_list.add(ce['ce_symbol'])
return list(clear_envs_list)
def structure_contains_atom_environment(self, atom_symbol, ce_symbol):
"""
Checks whether the structure contains a given atom in a given environment
:param atom_symbol: Symbol of the atom
:param ce_symbol: Symbol of the coordination environment
:return: True if the coordination environment is found, False otherwise
"""
for isite, site in enumerate(self.structure):
if (Element(atom_symbol) in site.species.
element_composition and self.site_contains_environment(isite, ce_symbol)):
return True
return False
@property
def uniquely_determines_coordination_environments(self):
"""
True if the coordination environments are uniquely determined.
"""
return self.strategy.uniquely_determines_coordination_environments
def __eq__(self, other):
"""
Equality method that checks if the LightStructureEnvironments object is equal to another
LightStructureEnvironments object. Two LightStructureEnvironments objects are equal if the strategy used
is the same, if the structure is the same, if the valences used in the strategies are the same, if the
coordination environments and the neighbours determined by the strategy are the same
:param other: LightStructureEnvironments object to compare with
:return: True if both objects are equal, False otherwise
"""
is_equal = (self.strategy == other.strategy and
self.structure == other.structure and
self.coordination_environments == other.coordination_environments and
self.valences == other.valences and
self.neighbors_sets == other.neighbors_sets)
this_sites = [ss['site'] for ss in self._all_nbs_sites]
other_sites = [ss['site'] for ss in other._all_nbs_sites]
this_indices = [ss['index'] for ss in self._all_nbs_sites]
other_indices = [ss['index'] for ss in other._all_nbs_sites]
return (is_equal and this_sites == other_sites and this_indices == other_indices)
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the LightStructureEnvironments object.
:return: Bson-serializable dict representation of the LightStructureEnvironments object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"strategy": self.strategy.as_dict(),
"structure": self.structure.as_dict(),
"coordination_environments": self.coordination_environments,
"all_nbs_sites": [{'site': nb_site['site'].as_dict(),
'index': nb_site['index'],
'image_cell': [int(ii) for ii in nb_site['image_cell']]}
for nb_site in self._all_nbs_sites],
"neighbors_sets": [[nb_set.as_dict() for nb_set in site_nb_sets] if site_nb_sets is not None else None
for site_nb_sets in self.neighbors_sets],
"valences": self.valences}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the LightStructureEnvironments object from a dict representation of the
LightStructureEnvironments created using the as_dict method.
:param d: dict representation of the LightStructureEnvironments object
:return: LightStructureEnvironments object
"""
dec = MontyDecoder()
structure = dec.process_decoded(d['structure'])
all_nbs_sites = []
for nb_site in d['all_nbs_sites']:
site = dec.process_decoded(nb_site['site'])
if 'image_cell' in nb_site:
image_cell = np.array(nb_site['image_cell'], np.int)
else:
diff = site.frac_coords - structure[nb_site['index']].frac_coords
rounddiff = np.round(diff)
if not np.allclose(diff, rounddiff):
raise ValueError('Weird, differences between one site in a periodic image cell is not '
'integer ...')
image_cell = np.array(rounddiff, np.int)
all_nbs_sites.append({'site': site,
'index': nb_site['index'],
'image_cell': image_cell})
neighbors_sets = [[cls.NeighborsSet.from_dict(dd=nb_set, structure=structure,
all_nbs_sites=all_nbs_sites)
for nb_set in site_nb_sets] if site_nb_sets is not None else None
for site_nb_sets in d['neighbors_sets']]
return cls(strategy=dec.process_decoded(d['strategy']),
coordination_environments=d['coordination_environments'],
all_nbs_sites=all_nbs_sites,
neighbors_sets=neighbors_sets,
structure=structure,
valences=d['valences'])
class ChemicalEnvironments(MSONable):
"""
Class used to store all the information about the chemical environment of a given site for a given list of
coordinated neighbours (internally called "cn_map")
"""
def __init__(self, coord_geoms=None):
"""
Initializes the ChemicalEnvironments object containing all the information about the chemical
environment of a given site
:param coord_geoms: coordination geometries to be added to the chemical environment.
"""
if coord_geoms is None:
self.coord_geoms = {}
else:
raise NotImplementedError('Constructor for ChemicalEnvironments with the coord_geoms argument is not'
'yet implemented')
def __getitem__(self, mp_symbol):
if not mp_symbol in self.coord_geoms:
raise IndexError()
return self.coord_geoms[mp_symbol]
def __len__(self):
"""
Returns the number of coordination geometries in this ChemicalEnvironments object
:return: Number of coordination geometries in this ChemicalEnvironments object
"""
return len(self.coord_geoms)
def __iter__(self):
for cg, cg_dict in self.coord_geoms.items():
yield (cg, cg_dict)
def minimum_geometry(self, symmetry_measure_type=None, max_csm=None):
"""
Returns the geometry with the minimum continuous symmetry measure of this ChemicalEnvironments
:return: tuple (symbol, csm) with symbol being the geometry with the minimum continuous symmetry measure and
csm being the continuous symmetry measure associted to it
:raise: ValueError if no coordination geometry is found in this ChemicalEnvironments object
"""
if len(self.coord_geoms) == 0:
return None
cglist = [cg for cg in self.coord_geoms]
if symmetry_measure_type is None:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures']['csm_wcs_ctwcc'] for cg in cglist])
else:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures'][symmetry_measure_type] for cg in cglist])
csmlist = [self.coord_geoms[cg] for cg in cglist]
imin = np.argmin(csms)
if max_csm is not None:
if csmlist[imin] > max_csm:
return None
return cglist[imin], csmlist[imin]
def minimum_geometries(self, n=None, symmetry_measure_type=None, max_csm=None):
"""
Returns a list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object
:param n: Number of geometries to be included in the list
:return: list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object
:raise: ValueError if no coordination geometry is found in this ChemicalEnvironments object
"""
cglist = [cg for cg in self.coord_geoms]
if symmetry_measure_type is None:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures']['csm_wcs_ctwcc'] for cg in cglist])
else:
csms = np.array([self.coord_geoms[cg]['other_symmetry_measures'][symmetry_measure_type] for cg in cglist])
csmlist = [self.coord_geoms[cg] for cg in cglist]
isorted = np.argsort(csms)
if max_csm is not None:
if n is None:
return [(cglist[ii], csmlist[ii]) for ii in isorted if csms[ii] <= max_csm]
else:
return [(cglist[ii], csmlist[ii]) for ii in isorted[:n] if csms[ii] <= max_csm]
else:
if n is None:
return [(cglist[ii], csmlist[ii]) for ii in isorted]
else:
return [(cglist[ii], csmlist[ii]) for ii in isorted[:n]]
def add_coord_geom(self, mp_symbol, symmetry_measure, algo='UNKNOWN', permutation=None, override=False,
local2perfect_map=None, perfect2local_map=None, detailed_voronoi_index=None,
other_symmetry_measures=None, rotation_matrix=None, scaling_factor=None):
"""
Adds a coordination geometry to the ChemicalEnvironments object
:param mp_symbol: Symbol (internal) of the coordination geometry added
:param symmetry_measure: Symmetry measure of the coordination geometry added
:param algo: Algorithm used for the search of the coordination geometry added
:param permutation: Permutation of the neighbors that leads to the csm stored
:param override: If set to True, the coordination geometry will override the existent one if present
:return: :raise: ChemenvError if the coordination geometry is already added and override is set to False
"""
if not allcg.is_a_valid_coordination_geometry(mp_symbol=mp_symbol):
raise ChemenvError(self.__class__,
'add_coord_geom',
'Coordination geometry with mp_symbol "{mp}" is not valid'
.format(mp=mp_symbol))
if mp_symbol in list(self.coord_geoms.keys()) and not override:
raise ChemenvError(self.__class__,
"add_coord_geom",
"This coordination geometry is already present and override is set to False")
else:
self.coord_geoms[mp_symbol] = {'symmetry_measure': float(symmetry_measure), 'algo': algo,
'permutation': [int(i) for i in permutation],
'local2perfect_map': local2perfect_map,
'perfect2local_map': perfect2local_map,
'detailed_voronoi_index': detailed_voronoi_index,
'other_symmetry_measures': other_symmetry_measures,
'rotation_matrix': rotation_matrix,
'scaling_factor': scaling_factor}
def __str__(self):
"""
Returns a string representation of the ChemicalEnvironments object
:return: String representation of the ChemicalEnvironments object
"""
out = 'Chemical environments object :\n'
if len(self.coord_geoms) == 0:
out += ' => No coordination in it <=\n'
return out
for key in self.coord_geoms.keys():
mp_symbol = key
break
cn = symbol_cn_mapping[mp_symbol]
out += ' => Coordination {} <=\n'.format(cn)
mp_symbols = list(self.coord_geoms.keys())
csms_wcs = [self.coord_geoms[mp_symbol]['other_symmetry_measures']['csm_wcs_ctwcc'] for mp_symbol in mp_symbols]
icsms_sorted = np.argsort(csms_wcs)
mp_symbols = [mp_symbols[ii] for ii in icsms_sorted]
for mp_symbol in mp_symbols:
csm_wcs = self.coord_geoms[mp_symbol]['other_symmetry_measures']['csm_wcs_ctwcc']
csm_wocs = self.coord_geoms[mp_symbol]['other_symmetry_measures']['csm_wocs_ctwocc']
out += ' - {}\n'.format(mp_symbol)
out += ' csm1 (with central site) : {}'.format(csm_wcs)
out += ' csm2 (without central site) : {}'.format(csm_wocs)
out += ' algo : {}'.format(self.coord_geoms[mp_symbol]['algo'])
out += ' perm : {}\n'.format(self.coord_geoms[mp_symbol]['permutation'])
out += ' local2perfect : {}\n'.format(str(self.coord_geoms[mp_symbol]['local2perfect_map']))
out += ' perfect2local : {}\n'.format(str(self.coord_geoms[mp_symbol]['perfect2local_map']))
return out
def is_close_to(self, other, rtol=0.0, atol=1e-8):
if set(self.coord_geoms.keys()) != set(other.coord_geoms.keys()):
return False
for mp_symbol, cg_dict_self in self.coord_geoms.items():
cg_dict_other = other[mp_symbol]
other_csms_self = cg_dict_self['other_symmetry_measures']
other_csms_other = cg_dict_other['other_symmetry_measures']
for csmtype in ['csm_wcs_ctwcc', 'csm_wcs_ctwocc', 'csm_wcs_csc',
'csm_wocs_ctwcc', 'csm_wocs_ctwocc', 'csm_wocs_csc']:
if not np.isclose(other_csms_self[csmtype], other_csms_other[csmtype], rtol=rtol, atol=atol):
return False
return True
def __eq__(self, other):
"""
Equality method that checks if the ChemicalEnvironments object is equal to another ChemicalEnvironments
object.
:param other: ChemicalEnvironments object to compare with
:return: True if both objects are equal, False otherwise
"""
if set(self.coord_geoms.keys()) != set(other.coord_geoms.keys()):
return False
for mp_symbol, cg_dict_self in self.coord_geoms.items():
cg_dict_other = other.coord_geoms[mp_symbol]
if cg_dict_self['symmetry_measure'] != cg_dict_other['symmetry_measure']:
return False
if cg_dict_self['algo'] != cg_dict_other['algo']:
return False
if cg_dict_self['permutation'] != cg_dict_other['permutation']:
return False
if cg_dict_self['detailed_voronoi_index'] != cg_dict_other['detailed_voronoi_index']:
return False
other_csms_self = cg_dict_self['other_symmetry_measures']
other_csms_other = cg_dict_other['other_symmetry_measures']
for csmtype in ['csm_wcs_ctwcc', 'csm_wcs_ctwocc', 'csm_wcs_csc',
'csm_wocs_ctwcc', 'csm_wocs_ctwocc', 'csm_wocs_csc']:
if other_csms_self[csmtype] != other_csms_other[csmtype]:
return False
return True
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Returns a dictionary representation of the ChemicalEnvironments object
:return:
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"coord_geoms": jsanitize(self.coord_geoms)}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the ChemicalEnvironments object from a dict representation of the ChemicalEnvironments created
using the as_dict method.
:param d: dict representation of the ChemicalEnvironments object
:return: ChemicalEnvironments object
"""
ce = cls()
for cg in d['coord_geoms'].keys():
if d['coord_geoms'][cg]['local2perfect_map'] is None:
l2p_map = None
else:
l2p_map = {int(key): int(val) for key, val in d['coord_geoms'][cg]['local2perfect_map'].items()}
if d['coord_geoms'][cg]['perfect2local_map'] is None:
p2l_map = None
else:
p2l_map = {int(key): int(val) for key, val in d['coord_geoms'][cg]['perfect2local_map'].items()}
if ('other_symmetry_measures' in d['coord_geoms'][cg] and
d['coord_geoms'][cg]['other_symmetry_measures'] is not None):
other_csms = d['coord_geoms'][cg]['other_symmetry_measures']
else:
other_csms = None
ce.add_coord_geom(cg,
d['coord_geoms'][cg]['symmetry_measure'],
d['coord_geoms'][cg]['algo'],
permutation=d['coord_geoms'][cg]['permutation'],
local2perfect_map=l2p_map,
perfect2local_map=p2l_map,
detailed_voronoi_index=d['coord_geoms'][cg]['detailed_voronoi_index'],
other_symmetry_measures=other_csms,
rotation_matrix=d['coord_geoms'][cg]['rotation_matrix'],
scaling_factor=d['coord_geoms'][cg]['scaling_factor'])
return ce | mit |
xodus7/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 28 | 5024 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
from tensorflow.python.util.deprecation import deprecated
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
@deprecated(None, 'Please use tf.estimator.inputs.pandas_input_fn')
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
@deprecated(None, 'Please access pandas data directly.')
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
alwayskidd/LRB | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
belteshassar/cartopy | lib/cartopy/examples/feature_creation.py | 2 | 1149 | __tags__ = ['Lines and polygons']
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from matplotlib.offsetbox import AnchoredText
def main():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent([80, 170, -45, 30])
# Put a background image on for nice sea rendering.
ax.stock_img()
# Create a feature for States/Admin 1 regions at 1:50m from Natural Earth
states_provinces = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none')
SOURCE = 'Natural Earth'
LICENSE = 'public domain'
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(states_provinces, edgecolor='gray')
# Add a text annotation for the license information to the
# the bottom right corner.
text = AnchoredText(r'$\mathcircled{{c}}$ {}; license: {}'
''.format(SOURCE, LICENSE),
loc=4, prop={'size': 12}, frameon=True)
ax.add_artist(text)
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
siutanwong/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
ArtsiomCh/tensorflow | tensorflow/python/estimator/inputs/pandas_io.py | 86 | 4503 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
TypeError: `shuffle` is not bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be explicitly set as boolean; '
'got {}'.format(shuffle))
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
| apache-2.0 |
nicepear/machine-learning | logRegres.py | 1 | 4017 | '''
Created on Oct 27, 2015
Logistic Regression Working Module
@author: Gu
'''
from numpy import *
def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
return 1.0/(1+exp(-inX))
def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn) #convert to NumPy matrix
labelMat = mat(classLabels).transpose() #convert to NumPy matrix
m,n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n,1))
for k in range(maxCycles): #heavy on matrix operations
h = sigmoid(dataMatrix*weights) #matrix mult
error = (labelMat - h) #vector subtraction
weights = weights + alpha * dataMatrix.transpose()* error #matrix mult
return weights
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
def stocGradAscent0(dataMatrix, classLabels):
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n) #initialize to all ones
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = shape(dataMatrix)
weights = ones(n) #initialize to all ones
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4/(1.0+j+i)+0.0001 #apha decreases with iteration, does not
randIndex = int(random.uniform(0,len(dataIndex)))#go to 0 because of the constant
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5: return 1.0
else: return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt'); frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 1000)
errorCount = 0; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights))!= int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print ("the error rate of this test is: %f" % errorRate)
return errorRate
def multiTest():
numTests = 10; errorSum=0.0
for k in range(numTests):
errorSum += colicTest()
print ("after %d iterations the average error rate is: %f" % (numTests, errorSum/float(numTests)))
weights=ones((3,1))
print(weights)
| mit |
linebp/pandas | pandas/util/_doctools.py | 9 | 6776 | import numpy as np
import pandas as pd
import pandas.compat as compat
class TablePlotter(object):
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df):
"""Calcurate table chape considering index levels"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
"""Calcurate appropriate figure size based on left and right data"""
if vertical:
# calcurate required number of cells
vcells = max(sum([self._shape(l)[0] for l in left]),
self._shape(right)[0])
hcells = (max([self._shape(l)[1] for l in left]) +
self._shape(right)[1])
else:
vcells = max([self._shape(l)[0] for l in left] +
[self._shape(right)[0]])
hcells = sum([self._shape(l)[1] for l in left] +
[self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical=True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if not isinstance(left, list):
left = [left]
left = [self._conv(l) for l in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max([self._shape(l)[1] for l in left])
max_left_rows = max([self._shape(l)[0] for l in left])
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label,
height=1.0 / max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title='Result', height=1.05 / vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max([self._shape(df)[0] for df in left + [right]])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for l, label in zip(left, labels):
sp = self._shape(l)
ax = fig.add_subplot(gs[0, i:i + sp[1]])
self._make_table(ax, l, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title='Result', height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""Convert each input to appropriate for table outplot"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name='')
else:
data = data.to_frame()
data = data.fillna('NaN')
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, 'Index', data.index)
else:
for i in range(idx_nlevels):
data.insert(i, 'Index{0}'.format(i),
data.index._get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns._get_level_values(0)
values = [data.columns._get_level_values(i).values
for i in range(1, col_nlevels)]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title, height=None):
if df is None:
ax.set_visible(False)
return
import pandas.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in compat.iteritems(props['celld']):
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor('#AAAAAA')
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis('off')
if __name__ == "__main__":
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({'A': [10, 11, 12],
'B': [20, 21, 22],
'C': [30, 31, 32]})
df2 = pd.DataFrame({'A': [10, 12],
'C': [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]),
labels=['df1', 'df2'], vertical=True)
plt.show()
df3 = pd.DataFrame({'X': [10, 12],
'Z': [30, 32]})
p.plot([df1, df3], pd.concat([df1, df3], axis=1),
labels=['df1', 'df2'], vertical=False)
plt.show()
idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'),
(2, 'A'), (2, 'B'), (2, 'C')])
col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')])
df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6],
'v2': [5, 6, 7, 8, 9, 10]},
index=idx)
df3.columns = col
p.plot(df3, df3, labels=['df3'])
plt.show()
| bsd-3-clause |
Brett777/Predict-Churn | DeployChurnEnsembleModel.py | 1 | 2262 | import os
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import h2o
import numpy as np
import pandas as pd
from tabulate import tabulate
# initialize the model scoring server
h2o.init(nthreads=1,max_mem_size=1, start_h2o=True, strict_version_check = False)
def churn_predict(State,AccountLength,AreaCode,Phone,IntlPlan,VMailPlan,VMailMessage,DayMins,DayCalls,DayCharge,EveMins,EveCalls,EveCharge,NightMins,NightCalls,NightCharge,IntlMins,IntlCalls,IntlCharge,CustServCalls):
# connect to the model scoring service
h2o.connect()
# open the downloaded model
ChurnPredictor = h2o.load_model(path='GBM-RF-ensemble')
# define a feature vector to evaluate with the model
newData = pd.DataFrame({'State' : State,
'Account Length' : AccountLength,
'Area Code' : AreaCode,
'Phone' : Phone,
'Int\'l Plan' : IntlPlan,
'VMail Plan' : VMailPlan,
'VMail Message' : VMailMessage,
'Day Mins' : DayMins,
'Day Calls' : DayCalls,
'Day Charge' : DayCharge,
'Eve Mins' : EveMins,
'Eve Calls' : EveCalls,
'Eve Charge' : EveCharge,
'Night Mins' : NightMins,
'Night Calls' : NightCalls,
'Night Charge' : NightCharge,
'Intl Mins' :IntlMins,
'Intl Calls' : IntlCalls,
'Intl Charge' : IntlCharge,
'CustServ Calls' : CustServCalls}, index=[0])
# evaluate the feature vector using the model
predictions = ChurnPredictor.predict(h2o.H2OFrame(newData))
predictionsOut = h2o.as_list(predictions, use_pandas=False)
prediction = predictionsOut[1][0]
probabilityChurn = predictionsOut[1][1]
probabilityRetain = predictionsOut[1][2]
return "Prediction: " + str(prediction) + " |Probability to Churn: " + str(probabilityChurn) + " |Probability to Retain: " + str(probabilityRetain) | mit |
stuartsale/LRF_gen | LRF_gen/plots.py | 1 | 1548 | import numpy
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors
from mpl_toolkits.axes_grid1 import ImageGrid, Divider
def set_plot_params():
fig_width_pt = 240.0 # Get this from LaTeX using \showthe\columnwidth
inches_per_pt = 1.0/72.27 # Convert pt to inch
golden_mean = (math.sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_width = fig_width_pt*inches_per_pt # width in inches
fig_height = fig_width*golden_mean # height in inches
fig_size = [fig_width, fig_height]
params = {'backend': 'ps', 'axes.labelsize': 10, 'text.fontsize': 10,
'legend.fontsize': 10, 'xtick.labelsize': 10,
'ytick.labelsize': 10, 'text.usetex': True,
'figure.figsize': fig_size, 'font.weight': "bolder",
'ytick.major.pad': 10, 'xtick.major.pad': 10,
'axes.titlesize': 10, 'ps.distiller.res': 24000}
return params
def plot_slice(cube, index, filename):
mpl.rcParams.update(set_plot_params())
fig = plt.figure()
ax = fig.add_subplot(111)
plt.subplots_adjust(left=0.1, right=0.9, bottom=0.22, top=0.95)
mean = numpy.mean(numpy.log(cube[index, :, :]))
std = numpy.std(numpy.log(cube[index, :, :]))
plate = ((numpy.log(cube[index, :, :])-mean)/std)
cax = ax.imshow(plate, vmin=-3, vmax=3.)
cbar = fig.colorbar(cax, ticks=[-3, -2, -1, 0, 1, 2, 3],)
cbar.set_label(r'$\ln \rho$')
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.savefig(filename)
| bsd-3-clause |
jonathansick/pySPS | demo/snapshot_test.py | 1 | 10519 | #!/usr/bin/env python
# encoding: utf-8
"""
Tests the iso-age library
History
-------
2011-10-02 - Created by Jonathan Sick
"""
__all__ = ['']
import os
import tables
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import snapshotlib
from fspsq import ParameterSet
from mclib import MagTableDef
from mclib import get_metallicity_LUT
from griddata import griddata
def main():
#tauzGrid = TauZGrid("tauz", dbname='fsps')
#tauzGrid.reset()
#tauzGrid.generate_grid()
#tauzGrid.compute_models(nThreads=2, maxN=10, clean=True)
#tauzGrid.plot_color("tauzgrid", 'MegaCam_g', 'MegaCam_i')
mclib = MonteCarloLibrary('mc.6')
#mclib.reset()
#mclib.define_samples(n=50000)
#mclib.compute_models(nThreads=8, maxN=100, clean=True)
#mclib.create_mag_table("mc6.h5")
mclib.bin_cc_index(("MegaCam_i","TMASS_Ks"),("MegaCam_g","MegaCam_i"),
"mc6.h5")
mclib.plot_cc_lut("mc6.h5", r"$i^\prime-K_s$", r"$g^\prime-i^\prime$")
class TauZGrid(snapshotlib.SnapshotLibrary):
"""Build a grid of Zs and Taus. Observe each at the age of the universe."""
def __init__(self, libname, dbname="fsps", age=13.7):
super(TauZGrid, self).__init__(libname, dbname=dbname, age=age)
def generate_grid(self):
zmets = range(1,23,4)
#zmets = [3, 5, 10, 15, 20]
#taus = np.arange(50.,100.,0.5)
#taus = [0.5,1., 5., 10.]
taus = np.linspace(0.1,100,10.)
for zmet in zmets:
for tau in taus:
pset = ParameterSet(None, sfh=1,
zmet=int(zmet), tau=float(tau), tage=13.7,
const=0.5)
self.register_pset(pset)
def plot_color(self, plotPath, c1Name, c2Name):
"""Plots colour vs tau for each metallicity."""
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
zmets = self.collection.distinct('pset.zmet')
zmets.sort()
print zmets
for zmet in zmets:
docs = self.collection.find({"pset.zmet":zmet})
taus = []
c1 = []
c2 = []
for doc in docs:
taus.append(doc['pset']['tau'])
c1.append(doc['obs'][c1Name])
c2.append(doc['obs'][c2Name])
taus = np.array(taus)
sort = np.argsort(taus)
taus = taus[sort]
c1 = np.array(c1)
c2 = np.array(c2)
c1 = c1[sort]
c2 = c2[sort]
c = c1 - c2
ax.plot(taus, c, '-')
ax.set_xlabel(r"$\tau$")
ax.set_ylabel("%s - %s" % (c1Name,c2Name))
fig.savefig(plotPath+".pdf", format='pdf')
class MonteCarloLibrary(snapshotlib.SnapshotLibrary):
"""docstring for MonteCarloLibrary"""
def __init__(self, libname, dbname='fsps', age=13.7):
super(MonteCarloLibrary, self).__init__(libname,dbname=dbname,age=age)
def define_samples(self, n=50000):
"""Define the set of models."""
for i in xrange(n):
pset = ParameterSet(None, # automatically create a name
sfh=1,
imf_type=1, # Chabrier 2003
dust_type=2, # Calzetti 2000 attenuation curve
zmet=int(self._sample_zmet()),
tau=float(self._sample_tau()),
const=float(self._sample_const()),
sf_start=float(self._sample_sf_start()),
fburst=float(self._sample_fburst()),
tburst=float(self._sample_tburst()),
dust2=float(self._sample_dust2()),
tage=13.7
)
self.register_pset(pset)
def _sample_zmet(self):
"""Returns a random metallicity"""
return np.random.randint(1,23)
def _sample_tau(self):
"""Returns a random e-folding of SFR"""
return np.random.uniform(0.1, 100.)
def _sample_const(self):
"""Returns fraction of mass formed as a constant mode of SF"""
return np.random.uniform(0.1,1.)
def _sample_sf_start(self):
"""Start time of SFH in Gyr"""
return np.random.uniform(0.5,10.)
def _sample_fburst(self):
"""Fraction of mass formed in an instantaneous burst of SF."""
return np.random.uniform(0.,0.9)
def _sample_tburst(self):
"""Time of the burst after the BB."""
return np.random.uniform(1.5,13.5)
def _sample_dust2(self):
"""Optical depth of ambient ISM."""
return np.random.uniform(0.1,1.)
def create_mag_table(self, outputPath,
isocType="pdva", specType="basel"):
"""Create an HDF5 table of that describes a set of magnitudes for
stellar population realizations at a defined age. Assumes a
snapshot data set for that age.
"""
if os.path.exists(outputPath): os.remove(outputPath)
title = os.path.splitext(os.path.basename(outputPath))[0]
h5file = tables.openFile(outputPath, mode="w", title=title)
table = h5file.createTable("/", 'mags', MagTableDef, "Mag Model Table")
print h5file
docs = self.collection.find({"compute_complete":True,
"obs": {"$exists": 1}}) # , limit=2
print "working on %i docs to read" % docs.count()
lut = get_metallicity_LUT(isocType, specType)
obsCols = ['mass','lbol','sfr','TMASS_J','TMASS_H',
'TMASS_Ks','MegaCam_u','MegaCam_g','MegaCam_r','MegaCam_i',
'MegaCam_z','GALEX_NUV','GALEX_FUV']
for doc in docs:
print "reading", doc['_id']
obs = doc['obs']
# Append model information (about SFH, dust, etc)
zmet = doc['pset']['zmet']
Z = lut[zmet-1]
# Append to HDF5
table.row['Z'] = Z
table.row['tau'] = doc['pset']['tau']
table.row['age'] = doc['pset']['tage']
for col in obsCols:
table.row[col] = obs[col]
table.row.append()
h5file.flush()
h5file.close()
def bin_cc_index(self, c1I, c2I, h5Path):
"""Bin the mag table into a colour colour diagram.
Produces an color-color table with indexes into models in
the mags HDF5 table.
Parameters
----------
c1I : tuple of two str
Names of the two bands that make the first colour
c2I : tuple of two str
Names of the two bands that make the second colour
"""
h5file = tables.openFile(h5Path, mode='a')
magTable = h5file.root.mags
c1 = np.array([x[c1I[0]]-x[c1I[1]] for x in magTable])
c2 = np.array([x[c2I[0]]-x[c2I[1]] for x in magTable])
mass = np.array([x['mass'] for x in magTable])
logL = np.array([x['lbol'] for x in magTable])
logML = mass - logL
grid, gridN, wherebin = griddata(c1, c2, logML, binsize=0.05,
retbin=True, retloc=True)
print "grid", grid.shape
# Set up the cc table
if 'cc' in h5file.root:
print "cc already exists"
h5file.root.cc._f_remove()
ccDtype = np.dtype([('c1',np.float),('c2',np.float),('xi',np.int),
('yi',np.int),('ml',np.float),('nmodels',np.int)])
ccTable = h5file.createTable("/", 'cc', ccDtype,
"CC Table %s-%s %s-%s" % (c1I[0],c1I[1],c2I[0],c2I[1]))
ny, nx = grid.shape
c1colors = np.arange(c1.min(), c1.max()+0.05, 0.05)
c2colors = np.arange(c2.min(), c2.max()+0.05, 0.05)
print "c1 range:", c1.min(), c1.max()
print "c2 range:", c2.min(), c2.max()
for i in xrange(ny):
for j in xrange(nx):
ccTable.row['c1'] = float(c1colors[j])
ccTable.row['c2'] = float(c2colors[i])
ccTable.row['yi'] = i
ccTable.row['xi'] = j
ccTable.row['ml'] = grid[i,j]
ccTable.row['nmodels'] = int(gridN[i,j])
print gridN[i,j]
ccTable.row.append()
h5file.flush()
h5file.close()
def plot_cc_lut(self, h5Path, xlabel, ylabel):
"""Create the g-i,i-Ks M/L look up table plot."""
h5file = tables.openFile(h5Path, mode='a')
ccTable = h5file.root.cc
c1, c2, xi, yi, ml, n = [],[],[],[],[],[]
for row in ccTable:
c1.append(row['c1'])
c2.append(row['c2'])
xi.append(row['xi'])
yi.append(row['yi'])
ml.append(row['ml'])
n.append(row['nmodels'])
c1 = np.array(c1)
c2 = np.array(c2)
xi = np.array(xi, dtype=np.int)
yi = np.array(yi, dtype=np.int)
print xi
print yi
nx = max(xi)+1
ny = max(yi)+1
# Extent is the physical limits (left, right, bottom, top)
extent = [min(c1), max(c1), min(c2), max(c2)]
print "extent:", extent
lut = np.empty((ny,nx), dtype=np.float)
nImage = np.empty((ny,nx), dtype=np.float)
for i in xrange(len(c1)):
lut[yi[i],xi[i]] = ml[i]
nImage[yi[i],xi[i]] = n[i]
print lut.shape
nImage[nImage == 0] = np.nan
fig = plt.figure(figsize=(4.,5,)) # set width,height in inches
fig.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.99)
ax = fig.add_subplot(211)
im = ax.imshow(lut, cmap=mpl.cm.jet, aspect='equal', extent=extent,
interpolation='nearest', origin='lower')
# Create the colorbar
# see http://matplotlib.sourceforge.net/api/pyplot_api.html#matplotlib.pyplot.colorbar
cbar = fig.colorbar(mappable=im, cax=None, ax=ax, orientation='vertical',
fraction=0.1, pad=0.02, shrink=0.75,)
cbar.set_label(r'$\langle\log M/L_\mathrm{bol}\rangle$')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
axN = fig.add_subplot(212)
im2 = axN.imshow(nImage, cmap=mpl.cm.jet, aspect='equal', extent=extent,
interpolation='nearest', origin='lower')
cbar2 = fig.colorbar(mappable=im2, cax=None, ax=axN, orientation='vertical',
fraction=0.1, pad=0.02, shrink=0.75,)
cbar2.set_label(r'N models')
axN.set_xlabel(xlabel)
axN.set_ylabel(ylabel)
fig.savefig(h5Path+".2.pdf", format="pdf") # can also do "eps", etc.
if __name__ == '__main__':
main()
| bsd-3-clause |
cmorgan/zipline | zipline/utils/factory.py | 17 | 11554 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Factory functions to prepare useful data.
"""
import pytz
import random
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.sources import (SpecificEquityTrades,
DataFrameSource,
DataPanelSource)
from zipline.finance.trading import SimulationParameters
from zipline.finance import trading
from zipline.sources.test_source import create_trade
# For backwards compatibility
from zipline.data.loader import (load_from_yahoo,
load_bars_from_yahoo)
__all__ = ['load_from_yahoo', 'load_bars_from_yahoo']
def create_simulation_parameters(year=2006, start=None, end=None,
capital_base=float("1.0e5"),
num_days=None, load=None,
data_frequency='daily',
emission_rate='daily'):
"""Construct a complete environment with reasonable defaults"""
if start is None:
start = datetime(year, 1, 1, tzinfo=pytz.utc)
if end is None:
if num_days:
trading.environment = trading.TradingEnvironment(load=load)
start_index = trading.environment.trading_days.searchsorted(
start)
end = trading.environment.trading_days[start_index + num_days - 1]
else:
end = datetime(year, 12, 31, tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=capital_base,
data_frequency=data_frequency,
emission_rate=emission_rate,
)
return sim_params
def create_random_simulation_parameters():
trading.environment = trading.TradingEnvironment()
treasury_curves = trading.environment.treasury_curves
for n in range(100):
random_index = random.randint(
0,
len(treasury_curves) - 1
)
start_dt = treasury_curves.index[random_index]
end_dt = start_dt + timedelta(days=365)
now = datetime.utcnow().replace(tzinfo=pytz.utc)
if end_dt <= now:
break
assert end_dt <= now, """
failed to find a suitable daterange after 100 attempts. please double
check treasury and benchmark data in findb, and re-run the test."""
sim_params = SimulationParameters(
period_start=start_dt,
period_end=end_dt
)
return sim_params, start_dt, end_dt
def get_next_trading_dt(current, interval):
next_dt = pd.Timestamp(current).tz_convert(trading.environment.exchange_tz)
while True:
# Convert timestamp to naive before adding day, otherwise the when
# stepping over EDT an hour is added.
next_dt = pd.Timestamp(next_dt.replace(tzinfo=None))
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=trading.environment.exchange_tz)
next_dt_utc = next_dt.tz_convert('UTC')
if trading.environment.is_market_hours(next_dt_utc):
break
next_dt = next_dt_utc.tz_convert(trading.environment.exchange_tz)
return next_dt_utc
def create_trade_history(sid, prices, amounts, interval, sim_params,
source_id="test_factory"):
trades = []
current = sim_params.first_open
trading.environment.update_asset_finder(identifiers=[sid])
oneday = timedelta(days=1)
use_midnight = interval >= oneday
for price, amount in zip(prices, amounts):
if use_midnight:
trade_dt = current.replace(hour=0, minute=0)
else:
trade_dt = current
trade = create_trade(sid, price, amount, trade_dt, source_id)
trades.append(trade)
current = get_next_trading_dt(current, interval)
assert len(trades) == len(prices)
return trades
def create_dividend(sid, payment, declared_date, ex_date, pay_date):
div = Event({
'sid': sid,
'gross_amount': payment,
'net_amount': payment,
'payment_sid': None,
'ratio': None,
'declared_date': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
return div
def create_stock_dividend(sid, payment_sid, ratio, declared_date,
ex_date, pay_date):
return Event({
'sid': sid,
'payment_sid': payment_sid,
'ratio': ratio,
'net_amount': None,
'gross_amount': None,
'dt': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
def create_split(sid, ratio, date):
return Event({
'sid': sid,
'ratio': ratio,
'dt': date.replace(hour=0, minute=0, second=0, microsecond=0),
'type': DATASOURCE_TYPE.SPLIT,
'source_id': 'MockSplitSource'
})
def create_txn(sid, price, amount, datetime):
txn = Event({
'sid': sid,
'amount': amount,
'dt': datetime,
'price': price,
'type': DATASOURCE_TYPE.TRANSACTION,
'source_id': 'MockTransactionSource'
})
return txn
def create_commission(sid, value, datetime):
txn = Event({
'dt': datetime,
'type': DATASOURCE_TYPE.COMMISSION,
'cost': value,
'sid': sid,
'source_id': 'MockCommissionSource'
})
return txn
def create_txn_history(sid, priceList, amtList, interval, sim_params):
txns = []
current = sim_params.first_open
for price, amount in zip(priceList, amtList):
current = get_next_trading_dt(current, interval)
txns.append(create_txn(sid, price, amount, current))
current = current + interval
return txns
def create_returns_from_range(sim_params):
return pd.Series(index=sim_params.trading_days,
data=np.random.rand(len(sim_params.trading_days)))
def create_returns_from_list(returns, sim_params):
return pd.Series(index=sim_params.trading_days[:len(returns)],
data=returns)
def create_daily_trade_source(sids, sim_params, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day.
"""
return create_trade_source(
sids,
timedelta(days=1),
sim_params,
concurrent=concurrent
)
def create_minutely_trade_source(sids, sim_params, concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.period_start, and every minute
thereafter for each sid. Thus, two sids should result in two trades per
minute.
"""
return create_trade_source(
sids,
timedelta(minutes=1),
sim_params,
concurrent=concurrent
)
def create_trade_source(sids, trade_time_increment, sim_params,
concurrent=False):
# If the sim_params define an end that is during market hours, that will be
# used as the end of the data source
if trading.environment.is_market_hours(sim_params.period_end):
end = sim_params.period_end
# Otherwise, the last_close after the period_end is used as the end of the
# data source
else:
end = sim_params.last_close
args = tuple()
kwargs = {
'sids': sids,
'start': sim_params.first_open,
'end': end,
'delta': trade_time_increment,
'filter': sids,
'concurrent': concurrent
}
source = SpecificEquityTrades(*args, **kwargs)
return source
def create_test_df_source(sim_params=None, bars='daily'):
if bars == 'daily':
freq = pd.datetools.BDay()
elif bars == 'minute':
freq = pd.datetools.Minute()
else:
raise ValueError('%s bars not understood.' % bars)
if sim_params:
index = sim_params.trading_days
else:
if trading.environment is None:
trading.environment = trading.TradingEnvironment()
start = pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
days = trading.environment.days_in_range(start, end)
if bars == 'daily':
index = days
if bars == 'minute':
index = pd.DatetimeIndex([], freq=freq)
for day in days:
day_index = trading.environment.market_minutes_for_day(day)
index = index.append(day_index)
x = np.arange(1, len(index) + 1)
df = pd.DataFrame(x, index=index, columns=[0])
trading.environment.update_asset_finder(identifiers=[0])
return DataFrameSource(df), df
def create_test_panel_source(sim_params=None, source_type=None):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
if trading.environment is None:
trading.environment = trading.TradingEnvironment()
index = trading.environment.days_in_range(start, end)
price = np.arange(0, len(index))
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'volume': volume,
'arbitrary': arbitrary},
index=index)
if source_type:
source_types = np.full(len(index), source_type)
df['type'] = source_types
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
def create_test_panel_ohlc_source(sim_params=None):
start = sim_params.first_open \
if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)
end = sim_params.last_close \
if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)
if trading.environment is None:
trading.environment = trading.TradingEnvironment()
index = trading.environment.days_in_range(start, end)
price = np.arange(0, len(index)) + 100
high = price * 1.05
low = price * 0.95
open_ = price + .1 * (price % 2 - .5)
volume = np.ones(len(index)) * 1000
arbitrary = np.ones(len(index))
df = pd.DataFrame({'price': price,
'high': high,
'low': low,
'open': open_,
'volume': volume,
'arbitrary': arbitrary},
index=index)
panel = pd.Panel.from_dict({0: df})
return DataPanelSource(panel), panel
| apache-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/util/test_deprecate_kwarg.py | 2 | 2047 | import pytest
from pandas.util._decorators import deprecate_kwarg
import pandas.util.testing as tm
@deprecate_kwarg("old", "new")
def _f1(new=False):
return new
_f2_mappings = {"yes": True, "no": False}
@deprecate_kwarg("old", "new", _f2_mappings)
def _f2(new=False):
return new
def _f3_mapping(x):
return x + 1
@deprecate_kwarg("old", "new", _f3_mapping)
def _f3(new=0):
return new
@pytest.mark.parametrize("key,klass", [("old", FutureWarning), ("new", None)])
def test_deprecate_kwarg(key, klass):
x = 78
with tm.assert_produces_warning(klass):
assert _f1(**{key: x}) == x
@pytest.mark.parametrize("key", list(_f2_mappings.keys()))
def test_dict_deprecate_kwarg(key):
with tm.assert_produces_warning(FutureWarning):
assert _f2(old=key) == _f2_mappings[key]
@pytest.mark.parametrize("key", ["bogus", 12345, -1.23])
def test_missing_deprecate_kwarg(key):
with tm.assert_produces_warning(FutureWarning):
assert _f2(old=key) == key
@pytest.mark.parametrize("x", [1, -1.4, 0])
def test_callable_deprecate_kwarg(x):
with tm.assert_produces_warning(FutureWarning):
assert _f3(old=x) == _f3_mapping(x)
def test_callable_deprecate_kwarg_fail():
msg = "((can only|cannot) concatenate)|(must be str)|(Can't convert)"
with pytest.raises(TypeError, match=msg):
_f3(old="hello")
def test_bad_deprecate_kwarg():
msg = "mapping from old to new argument values must be dict or callable!"
with pytest.raises(TypeError, match=msg):
@deprecate_kwarg("old", "new", 0)
def f4(new=None):
return new
@deprecate_kwarg("old", None)
def _f4(old=True, unchanged=True):
return old, unchanged
@pytest.mark.parametrize("key", ["old", "unchanged"])
def test_deprecate_keyword(key):
x = 9
if key == "old":
klass = FutureWarning
expected = (x, True)
else:
klass = None
expected = (True, x)
with tm.assert_produces_warning(klass):
assert _f4(**{key: x}) == expected
| apache-2.0 |
jaidevd/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
genn-team/genn | userproject/SynDelay_project/plot.py | 1 | 1581 | import matplotlib.pyplot as plt
import numpy as np
import sys
assert len(sys.argv) >= 2
def load_spikes(filename):
return np.loadtxt(filename, dtype={"names": ("time", "neuron_id"),
"formats": (np.float, np.int)})
def load_voltages(filename):
return np.loadtxt(filename, dtype={"names": ("time", "v_input", "v_inter", "v_output"),
"formats": (np.float, np.float, np.float, np.float)})
# Load spikes
input_spikes = load_spikes(sys.argv[1] + "_input_st")
inter_spikes = load_spikes(sys.argv[1] + "_inter_st")
output_spikes = load_spikes(sys.argv[1] + "_output_st")
voltages = load_voltages(sys.argv[1] + "_Vm")
# Create plot
figure, axes = plt.subplots(3, sharex=True)
input_v = axes[0].twinx()
inter_v = axes[1].twinx()
output_v = axes[2].twinx()
axes[0].scatter(input_spikes["time"], input_spikes["neuron_id"], s=2)
input_v.plot(voltages["time"], voltages["v_input"], color="red")
axes[1].scatter(inter_spikes["time"], inter_spikes["neuron_id"], s=2)
inter_v.plot(voltages["time"], voltages["v_inter"], color="red")
axes[2].scatter(output_spikes["time"], output_spikes["neuron_id"], s=2)
output_v.plot(voltages["time"], voltages["v_output"], color="red")
axes[0].set_ylabel("Input neuron number")
input_v.set_ylabel("Input neuron membrane voltage")
axes[1].set_ylabel("Inter neuron number")
inter_v.set_ylabel("Inter neuron membrane voltage")
axes[2].set_ylabel("Output neuron number")
output_v.set_ylabel("Output neuron membrane voltage")
axes[2].set_xlabel("Time [ms]")
plt.show() | gpl-2.0 |
Lcarey/funding_analysis | PubMed_PhrasesOverTime/ProcessAbstractXMLs__save_pickle_dfs.py | 1 | 3159 | #%% load data
import pubmed_parser as pp
import pandas as pd
import os, glob
import matplotlib.pyplot as plt
from pyspark.sql import Row
from pyspark.sql import SparkSession
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from shutil import rmtree
import concurrent.futures
import logging
import numpy as np
working_dir = '/Users/lcarey/Downloads/Pubmed/'
pickle_output_filename = 'df.pickle'
os.chdir(working_dir)
xml_file_list = glob.glob( working_dir + '*xml.gz')
#xml_file_list = glob.glob( working_dir + '*n000*xml.gz')
print(f"{len(xml_file_list):,} xml files.\n" , xml_file_list[:4])
#xml_file_list = xml_file_list[:50]
#%% function to build df & save table to file
def build_df_and_save_file_from_meline_xml(filename):
#print(f"loading {filename}...")
output_pickle_filename = filename+'.pickle.xz'
try: # try loading the file, and make sure it has at least five rows & an abstract
df = pd.read_pickle(output_pickle_filename)
len(df.loc[5,'abstract'])>10
df.iloc[5]
print(f"ALREADY PROCESSED, SKIPPPING\t{filename}...")
return pd.DataFrame() # return None makes Spark crash
except: # if we can't load the processed pickle file, generate it from the xml
pubmed_dict = pp.parse_medline_xml(filename) # dictionary output
print(f"loaded {filename}\tcontains {len(pubmed_dict)} entries.")
tmp_df = pd.DataFrame()
tmp_df['year'] = [d['pubdate'] for d in pubmed_dict]
tmp_df['abstract'] = [d['abstract'] for d in pubmed_dict]
tmp_df['abstract'] = tmp_df['abstract'].str.lower()
tmp_df['abstract_nchar'] = [len(t) for t in tmp_df['abstract'] ]
tmp_df = tmp_df[tmp_df.abstract_nchar > 100] # remove abstracts that are too short
tmp_df.reset_index(inplace=True, drop=True)
tmp_df.to_pickle(output_pickle_filename,compression='xz')
return tmp_df
# %% multi-threaded processing of all xml files
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
executor.map(build_df_and_save_file_from_meline_xml, xml_file_list)
# %% load all processed files into a single df
## save as a pickled DF
## using concat is faster, but uses too much RAM
# xml_file_list = glob.glob( working_dir + '*xml.gz.pickle.xz')
# print(len(xml_file_list))
# df_list = list()
# for i,filename in enumerate(xml_file_list):
# print(i,end=' ')
# df_list.append(pd.read_pickle(filename))
# df = pd.concat(df_list,ignore_index=True)
# del(df_list)
#print('\ndone')
#df.to_pickle('all_files_one_df.pickle.xz',compression='xz')
# %% load all processed files into a list
# will search on this list of dataframes
xml_file_list = glob.glob( working_dir + '*xml.gz.pickle.xz')
print(len(xml_file_list))
df_list = list()
for i,filename in enumerate(xml_file_list):
print(i,end=' ')
df_list.append(pd.read_pickle(filename))
print('\ndone')
#df.to_pickle('all_files_one_df.pickle.xz',compression='xz')
| gpl-2.0 |
CGATOxford/CGATPipelines | obsolete/reports/pipeline_capseq/trackers/macs_replicated_interval_annotations.py | 1 | 7067 | import os
import sys
import re
import types
import itertools
import matplotlib.pyplot as plt
import numpy
import numpy.ma
import Stats
import Histogram
import cpgReport
from CGATReport.Tracker import *
from CGATReport.odict import OrderedDict as odict
from macs_annotations import *
##########################################################################
class replicatedAnnotations(cpgReport.cpgTracker):
"""Base class for trackers getting info from the annotations tables.
Derived Trackers should define the two attributes :attr:`mSelect` and :attr:`mColumns`. """
pattern = "(.*)_annotations$"
mTable = "annotations"
mSelect = None
mColumns = None
mWhere = "1"
def __call__(self, track, slice=None):
where = self.mWhere
select = self.mSelect
table = self.mTable
if slice == "all" or slice is None:
data = self.getFirstRow(
"%(select)s FROM %(track)s_%(table)s WHERE %(where)s" % locals())
else:
data = self.getFirstRow(
"%(select)s FROM %(track)s_%(table)s WHERE %(where)s AND is_%slices" % locals())
return odict(list(zip(self.mColumns, data)))
##########################################################################
class replicatedAnnotationsAssociated(cpgReport.cpgTracker):
"""simple join between a data table and table defining slices.
:attr:`mTable`
table to join with
:attr:`mColums`
columns to output
"""
mPattern = "_annotations$"
mTable = None
mColumns = None
mWhere = "1"
mSelectAll = "SELECT %(columns)s FROM %(track)s_%(table)s AS t WHERE %(where)s"
mSelectSubset = "SELECT %(columns)s FROM %(track)s_%(table)s AS t, %(track)s_annotation AS a WHERE a.gene_id = t.gene_id AND a.is_%(slice)s AND %(where)s"
mSelectSlice = "SELECT %(columns)s FROM %(track)s_%(table)s AS t, %(track)s_%(slice)s AS s WHERE s.gene_id = t.gene_id AND %(where)s"
mSelectMixture = "SELECT %(columns)s FROM %(track)s_%(table)s AS t, %(subset)s AS s, %(track)s_annotation AS a WHERE a.gene_id = t.gene_id AND a.is_%(slice)s AND s.gene_id = t.gene_id AND %(where)s"
def getStatement(self, track, slice=None):
columns = self.mColumns
table = self.mTable
where = self.mWhere
if not table or not columns:
raise NotImplementedError
if slice and "." in slice:
slice, subset = slice.split(".")
return self.mSelectMixture % locals()
elif slice == "all" or slice is None:
return self.mSelectAll % locals()
else:
return self.mSelectSubset % locals()
##########################################################################
class replicatedRepeatOverlap(AnnotationsAssociated):
"""Overlap with repeats."""
mPattern = "_repeats$"
mColumns = "SUM(CASE WHEN nover>0 THEN 1 ELSE 0 END) as with, SUM(CASE WHEN nover=0 THEN 1 ELSE 0 END) AS without"
mTable = "repeats"
def __call__(self, track, slice=None):
statement = self.getStatement(track, slice)
if not statement:
return []
return odict(list(zip(("with", "without"), self.getFirstRow(statement))))
##########################################################################
##########################################################################
##########################################################################
class replicatedTSSOverlap(cpgReport.cpgTracker):
'''number of TSS that an interval overlaps.'''
mPattern = "_tss$"
mAnnotations = "annotations"
mTable = "tss"
mColumn = "d.is_overlap"
mWhere = "d.is_overlap < 5 "
def __call__(self, track, slice=None):
annotations = self.mAnnotations
table = self.mTable
column, where = self.mColumn, self.mWhere
if not slice or slice == "all":
data = self.getValues(
"""SELECT %(column)s FROM %(track)s_%(table)s AS d WHERE %(where)s""" % locals() )
else:
data = self.getValues( """SELECT %(column)s FROM %(track)s_%(table)s AS d, %(track)s_%(annotations)s as a
WHERE d.gene_id = a.gene_id AND a.is_%(slice)s AND %(where)s""" % locals() )
hist, bins = numpy.histogram(
data, bins=numpy.arange(0, max(data) + 1, 1))
return odict(list(zip(list(map(str, bins[:-1])), hist)))
##########################################################################
class replicatedTSSClosest(cpgReport.cpgTracker):
"""for each interval, return the distance to the closest TSS."""
mXLabel = "distance / bases"
mPattern = "_tss$"
mColumn = "d.closest_dist"
mWhere = "1"
mAnnotations = "annotations"
mTable = "tss"
def __call__(self, track, slice=None):
annotations = self.mAnnotations
table = self.mTable
column, where = self.mColumn, self.mWhere
if not slice or slice == "all":
data = self.get(
"""SELECT %(column)s FROM %(track)s_%(table)s AS d WHERE %(where)s""" % locals() )
else:
data = self.get( """SELECT %(column)s FROM %(track)s_%(table)s AS d, %(track)s_%(annotations)s as a
WHERE d.gene_id = a.gene_id AND a.is_%(slice)s AND %(where)s""" % locals() )
return data
##########################################################################
class replicatedTSSClosestUpstream(TSSClosest):
"""for each interval, return peakval and the distance to the closest upstream TSS."""
mColumn = "d.dist5"
mWhere = "d.dist5 > 0"
##########################################################################
class replicatedTSSClosestDownstream(TSSClosest):
"""for each interval, return peakval and the distance to the closest downstream TSS."""
mColumn = "d.dist3"
mWhere = "d.dist3 > 0"
##########################################################################
class replicatedTSSProfile(cpgReport.cpgTracker):
"""Get profile around TSS"""
mPattern = "_tss$"
def __call__(self, track, slice=None):
statement1 = """SELECT (closest_dist*-1) as d from %(track)s_tss where closest_dist=dist5 """
statement2 = """SELECT closest_dist as d from %(track)s_tss where closest_dist=dist3 """
data1 = self.getValues(statement1)
data2 = self.getValues(statement2)
return {"Genomic_distance": data1 + data2}
##########################################################################
class replicatedTTSProfile(cpgReport.cpgTracker):
"""Get profile around TTS"""
mPattern = "_tts$"
def __call__(self, track, slice=None):
statement1 = """SELECT (closest_dist*-1) as d from %(track)s_tts where closest_dist=dist5 """
statement2 = """SELECT closest_dist as d from %(track)s_tts where closest_dist=dist3 """
data1 = self.getValues(statement1)
data2 = self.getValues(statement2)
return {"Genomic_distance": data1 + data2}
| mit |
mattcaldwell/zipline | tests/history_cases.py | 2 | 21170 | """
Test case definitions for history tests.
"""
import pandas as pd
import numpy as np
from zipline.finance.trading import TradingEnvironment
from zipline.history.history import HistorySpec
from zipline.protocol import BarData
from zipline.utils.test_utils import to_utc
def mixed_frequency_expected_index(count, frequency):
"""
Helper for enumerating expected indices for test_mixed_frequency.
"""
env = TradingEnvironment.instance()
minute = MIXED_FREQUENCY_MINUTES[count]
if frequency == '1d':
return [env.previous_open_and_close(minute)[1], minute]
elif frequency == '1m':
return [env.previous_market_minute(minute), minute]
def mixed_frequency_expected_data(count, frequency):
"""
Helper for enumerating expected data test_mixed_frequency.
"""
if frequency == '1d':
# First day of this test is July 3rd, which is a half day.
if count < 210:
return [np.nan, count]
else:
return [209, count]
elif frequency == '1m':
if count == 0:
return [np.nan, count]
else:
return [count - 1, count]
MIXED_FREQUENCY_MINUTES = TradingEnvironment.instance().market_minute_window(
to_utc('2013-07-03 9:31AM'), 600,
)
ONE_MINUTE_PRICE_ONLY_SPECS = [
HistorySpec(1, '1m', 'price', True, data_frequency='minute'),
]
DAILY_OPEN_CLOSE_SPECS = [
HistorySpec(3, '1d', 'open_price', False, data_frequency='minute'),
HistorySpec(3, '1d', 'close_price', False, data_frequency='minute'),
]
ILLIQUID_PRICES_SPECS = [
HistorySpec(3, '1m', 'price', False, data_frequency='minute'),
HistorySpec(5, '1m', 'price', True, data_frequency='minute'),
]
MIXED_FREQUENCY_SPECS = [
HistorySpec(1, '1m', 'price', False, data_frequency='minute'),
HistorySpec(2, '1m', 'price', False, data_frequency='minute'),
HistorySpec(2, '1d', 'price', False, data_frequency='minute'),
]
MIXED_FIELDS_SPECS = [
HistorySpec(3, '1m', 'price', True, data_frequency='minute'),
HistorySpec(3, '1m', 'open_price', True, data_frequency='minute'),
HistorySpec(3, '1m', 'close_price', True, data_frequency='minute'),
HistorySpec(3, '1m', 'high', True, data_frequency='minute'),
HistorySpec(3, '1m', 'low', True, data_frequency='minute'),
HistorySpec(3, '1m', 'volume', True, data_frequency='minute'),
]
HISTORY_CONTAINER_TEST_CASES = {
# June 2013
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
'test one minute price only': {
# A list of HistorySpec objects.
'specs': ONE_MINUTE_PRICE_ONLY_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-21 9:31AM'),
# Sequency of updates to the container
'updates': [
BarData(
{
1: {
'price': 5,
'dt': to_utc('2013-06-21 9:31AM'),
},
},
),
BarData(
{
1: {
'price': 6,
'dt': to_utc('2013-06-21 9:32AM'),
},
},
),
],
# Expected results
'expected': {
ONE_MINUTE_PRICE_ONLY_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [5],
},
index=[
to_utc('2013-06-21 9:31AM'),
],
),
pd.DataFrame(
data={
1: [6],
},
index=[
to_utc('2013-06-21 9:32AM'),
],
),
],
},
},
'test daily open close': {
# A list of HistorySpec objects.
'specs': DAILY_OPEN_CLOSE_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-21 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'open_price': 10,
'close_price': 11,
'dt': to_utc('2013-06-21 10:00AM'),
},
},
),
BarData(
{
1: {
'open_price': 12,
'close_price': 13,
'dt': to_utc('2013-06-21 3:30PM'),
},
},
),
BarData(
{
1: {
'open_price': 14,
'close_price': 15,
# Wait a full market day before the next bar.
# We should end up with nans for Monday the 24th.
'dt': to_utc('2013-06-25 9:31AM'),
},
},
),
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': {
# open
DAILY_OPEN_CLOSE_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 10]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 10:00AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 10]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 3:30PM'),
],
),
pd.DataFrame(
data={
1: [10, np.nan, 14]
},
index=[
to_utc('2013-06-21 4:00PM'),
to_utc('2013-06-24 4:00PM'),
to_utc('2013-06-25 9:31AM'),
],
),
],
# close
DAILY_OPEN_CLOSE_SPECS[1].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 11]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 10:00AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 13]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 3:30PM'),
],
),
pd.DataFrame(
data={
1: [13, np.nan, 15]
},
index=[
to_utc('2013-06-21 4:00PM'),
to_utc('2013-06-24 4:00PM'),
to_utc('2013-06-25 9:31AM'),
],
),
],
},
},
'test illiquid prices': {
# A list of HistorySpec objects.
'specs': ILLIQUID_PRICES_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-28 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'price': 10,
'dt': to_utc('2013-06-28 9:31AM'),
},
},
),
BarData(
{
1: {
'price': 11,
'dt': to_utc('2013-06-28 9:32AM'),
},
},
),
BarData(
{
1: {
'price': 12,
'dt': to_utc('2013-06-28 9:33AM'),
},
},
),
BarData(
{
1: {
'price': 13,
# Note: Skipping 9:34 to simulate illiquid bar/missing
# data.
'dt': to_utc('2013-06-28 9:35AM'),
},
},
),
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': {
ILLIQUID_PRICES_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 10],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 10, 11],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [10, 11, 12],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
# Since there's no update for 9:34, this is called at 9:35.
pd.DataFrame(
data={
1: [12, np.nan, 13],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
ILLIQUID_PRICES_SPECS[1].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, np.nan, np.nan, 10],
},
index=[
to_utc('2013-06-27 3:57PM'),
to_utc('2013-06-27 3:58PM'),
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, np.nan, 10, 11],
},
index=[
to_utc('2013-06-27 3:58PM'),
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 10, 11, 12],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
# Since there's no update for 9:34, this is called at 9:35.
# The 12 value from 9:33 should be forward-filled.
pd.DataFrame(
data={
1: [10, 11, 12, 12, 13],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
},
},
'test mixed frequencies': {
# A list of HistorySpec objects.
'specs': MIXED_FREQUENCY_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
# July 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
'dt': to_utc('2013-07-03 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'price': count,
'dt': dt,
}
}
)
for count, dt in enumerate(MIXED_FREQUENCY_MINUTES)
],
# Dictionary mapping spec_key -> list of expected outputs.
'expected': {
MIXED_FREQUENCY_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [count],
},
index=[minute],
)
for count, minute in enumerate(MIXED_FREQUENCY_MINUTES)
],
MIXED_FREQUENCY_SPECS[1].key_str: [
pd.DataFrame(
data={
1: mixed_frequency_expected_data(count, '1m'),
},
index=mixed_frequency_expected_index(count, '1m'),
)
for count in range(len(MIXED_FREQUENCY_MINUTES))
],
MIXED_FREQUENCY_SPECS[2].key_str: [
pd.DataFrame(
data={
1: mixed_frequency_expected_data(count, '1d'),
},
index=mixed_frequency_expected_index(count, '1d'),
)
for count in range(len(MIXED_FREQUENCY_MINUTES))
]
},
},
'test multiple fields and sids': {
# A list of HistorySpec objects.
'specs': MIXED_FIELDS_SPECS,
# Sids for the test.
'sids': [1, 10],
# Start date for test.
'dt': to_utc('2013-06-28 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'dt': dt,
'price': count,
'open_price': count,
'close_price': count,
'high': count,
'low': count,
'volume': count,
},
10: {
'dt': dt,
'price': count * 10,
'open_price': count * 10,
'close_price': count * 10,
'high': count * 10,
'low': count * 10,
'volume': count * 10,
},
},
)
for count, dt in enumerate([
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
# NOTE: No update for 9:34
to_utc('2013-06-28 9:35AM'),
])
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': dict(
# Build a dict from a list of tuples. Doing it this way because
# there are two distinct cases we want to test: forward-fillable
# fields and non-forward-fillable fields.
[
(
# Non forward-fill fields
key,
[
pd.DataFrame(
data={
1: [np.nan, np.nan, 0],
10: [np.nan, np.nan, 0],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 0, 1],
10: [np.nan, 0, 10],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [0, 1, 2],
10: [0, 10, 20],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
pd.DataFrame(
data={
1: [2, np.nan, 3],
10: [20, np.nan, 30],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
# For volume, when we are missing data, we replace
# it with 0s to show that no trades occured.
).fillna(0 if 'volume' in key else np.nan),
],
)
for key in [spec.key_str for spec in MIXED_FIELDS_SPECS
if spec.field not in HistorySpec.FORWARD_FILLABLE]
]
+ # Concatenate the expected results for non-ffillable with
# expected result for ffillable.
[
(
# Forward-fillable fields
key,
[
pd.DataFrame(
data={
1: [np.nan, np.nan, 0],
10: [np.nan, np.nan, 0],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 0, 1],
10: [np.nan, 0, 10],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [0, 1, 2],
10: [0, 10, 20],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
pd.DataFrame(
data={
1: [2, 2, 3],
10: [20, 20, 30],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
)
for key in [spec.key_str for spec in MIXED_FIELDS_SPECS
if spec.field in HistorySpec.FORWARD_FILLABLE]
]
),
},
}
| apache-2.0 |
hupili/bearcart | examples/stocks.py | 4 | 1375 | # -*- coding: utf-8 -*-
'''
An example for Bearcart
'''
import datetime
import bearcart
import pandas as pd
import pandas.io.data as web
html_path = r'index.html'
data_path = r'data.json'
js_path = 'rickshaw.min.js'
css_path = 'rickshaw.min.css'
#All of the following import code comes from Wes McKinney's book, Python
#for Data Analysis
all_data = {}
date_start = datetime.datetime(2010, 1, 1)
date_end = datetime.datetime(2014, 1, 1)
for ticker in ['AAPL', 'IBM', 'YHOO', 'MSFT']:
all_data[ticker] = web.DataReader(ticker, 'yahoo', date_start, date_end)
price = pd.DataFrame({tic: data['Adj Close']
for tic, data in all_data.items()})
#Two data, line chart
df = pd.concat([price['YHOO'], price['MSFT']], axis=1)
vis = bearcart.Chart(df)
vis.create_chart(html_path=html_path, data_path=data_path,
js_path=js_path, css_path=css_path)
#Bunch of data, area chart
vis = bearcart.Chart(price, plt_type='area')
vis.create_chart(html_path=html_path, data_path=data_path,
js_path=js_path, css_path=css_path)
#Two data, custom colors, scatterplot
vis = bearcart.Chart(df, plt_type='scatterplot', colors={'AAPL': '#1d4e69',
'GOOG': '#3b98ca' })
vis.create_chart(html_path=html_path, data_path=data_path,
js_path=js_path, css_path=css_path)
| mit |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/pandas/tools/plotting.py | 1 | 117556 | # being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.common import AbstractMethodError
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'legend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise ValueError("color_type must be either 'default' or 'random'")
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = com.notnull(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_+ rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j!= 0:
ax.yaxis.set_visible(False)
if i != n-1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _gca():
import matplotlib.pyplot as plt
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=com.pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi, sin, cos
import matplotlib.pyplot as plt
def function(amplitudes):
def f(x):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
harmonic = 1.0
for x_even, x_odd in zip(amplitudes[1::2], amplitudes[2::2]):
result += (x_even * sin(harmonic * x) +
x_odd * cos(harmonic * x))
harmonic += 1.0
if len(amplitudes) % 2 != 0:
result += amplitudes[-1] * sin(harmonic * x)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
x = [-pi + 2.0 * pi * (t / float(samples)) for t in range(samples)]
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = [f(t) for t in x]
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, **kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pydata/pandas/master/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = com.pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, linewidth=1, color='black')
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
if isinstance(self._default_rot, dict):
self.rot = self._default_rot[self.kind]
else:
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else True
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if com.is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError("Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
if self.sort_columns:
columns = com._try_sort(data.columns)
else:
columns = data.columns
for col in columns:
if keep_index is True:
yield col, data[col]
else:
yield col, data[col].values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._post_plot_logic()
self._adorn_subplots()
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
new_ax._get_lines.color_cycle = orig_ax._get_lines.color_cycle
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not com.is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (com.is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, Series):
label = self.kwds.pop('label', None)
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
numeric_data = data.convert_objects()._get_numeric_data()
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic(self):
pass
def _adorn_subplots(self):
to_adorn = self.axes
if len(self.axes) > 0:
all_axes = self._get_axes()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in to_adorn:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
self.fig.suptitle(self.title)
else:
self.axes[0].set_title(self.title)
labels = [com.pprint_thing(key) for key in self.data.index]
labels = dict(zip(range(len(self.data.index)), labels))
for ax in self.axes:
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [labels.get(x, '') for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [labels.get(y, '') for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = com.pprint_thing(name)
return name
else:
stringified = map(com.pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if not label is None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if not leg is None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if not self.legend_title is None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.order())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
def _is_datetype(self):
index = self.data.index
return (isinstance(index, (PeriodIndex, DatetimeIndex)) or
index.inferred_type in ('datetime', 'date', 'datetime64',
'time'))
def _get_plot_function(self):
'''
Returns the matplotlib plotting function (plot or errorbar) based on
the presence of errorbar keywords.
'''
errorbar = any(e is not None for e in self.errors.values())
def plotf(ax, x, y, style=None, **kwds):
mask = com.isnull(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if errorbar:
return self.plt.Axes.errorbar(ax, x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is unsupported
if style is not None:
args = (ax, x, y, style)
else:
args = (ax, x, y)
return self.plt.Axes.plot(*args, **kwds)
return plotf
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if any(x is not None for x in name):
name = ','.join([com.pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = com.pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _get_style(self, i, col_name):
style = ''
if self.subplots:
style = 'k'
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[i]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(col_name, style)
else:
style = self.style
return style or None
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _maybe_add_color(self, colors, kwds, style, i):
has_color = 'color' in kwds or self.colormap is not None
if has_color and (style is None or re.match('[a-z]+', style) is None):
kwds['color'] = colors[i % len(colors)]
def _parse_errorbars(self, label, err):
'''
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
'''
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex_axis(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif com.is_list_like(err):
if com.is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif com.is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_axes(self):
return self.axes[0].get_figure().get_axes()
def _get_axes_layout(self):
axes = self._get_axes()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class ScatterPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, c=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError( 'scatter requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.x = x
self.y = y
self.c = c
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib as mpl
mpl_ge_1_3_1 = str(mpl.__version__) >= LooseVersion('1.3.1')
import matplotlib.pyplot as plt
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = com.is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = plt.cm.get_cmap(cmap)
if c is None:
c_values = self.plt.rcParams['patch.facecolor']
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if mpl_ge_1_3_1:
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds)
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class HexBinPlot(MPLPlot):
_layout_type = 'single'
def __init__(self, data, x, y, C=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError('hexbin requires and x and y column')
if com.is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if com.is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if com.is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.x = x
self.y = y
self.C = C
@property
def nseries(self):
return 1
def _make_plot(self):
import matplotlib.pyplot as plt
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
def _post_plot_logic(self):
ax = self.axes[0]
x, y = self.x, self.y
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
class LinePlot(MPLPlot):
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _index_freq(self):
freq = getattr(self.data.index, 'freq', None)
if freq is None:
freq = getattr(self.data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(self.data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _is_dynamic_freq(self, freq):
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq is not None and self._no_base(freq)
def _no_base(self, freq):
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(self.data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = self.data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _use_dynamic_x(self):
freq = self._index_freq()
ax = self._get_ax(0)
ax_freq = getattr(ax, 'freq', None)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
return (freq is not None) and self._is_dynamic_freq(freq)
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _make_plot(self):
self._initialize_prior(len(self.data))
if self._is_ts_plot():
data = self._maybe_convert_index(self.data)
x = data.index # dummy, not used
plotf = self._get_ts_plot_function()
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._get_plot_function()
it = self._iter_data()
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
style = self._get_style(i, label)
kwds = self.kwds.copy()
self._maybe_add_color(colors, kwds, style, i)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i, **kwds)
self._add_legend_handle(newlines[0], label, index=i)
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
def _get_stacked_values(self, y, label):
if self.stacked:
if (y >= 0).all():
return self._pos_prior + y
elif (y <= 0).all():
return self._neg_prior + y
else:
raise ValueError('When stacked is True, each column must be either all positive or negative.'
'{0} contains both positive and negative values'.format(label))
else:
return y
def _get_plot_function(self):
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
# column_num is used to get the target column from protf in line and area plots
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
self._update_prior(y)
return lines
return plotf
def _get_ts_plot_function(self):
from pandas.tseries.plotting import tsplot
plotf = self._get_plot_function()
def _plot(ax, x, data, style=None, **kwds):
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
lines = tsplot(data, plotf, ax=ax, style=style, **kwds)
return lines
return _plot
def _initialize_prior(self, n):
self._pos_prior = np.zeros(n)
self._neg_prior = np.zeros(n)
def _update_prior(self, y):
if self.stacked and not self.subplots:
# tsplot resample may changedata length
if len(self._pos_prior) != len(y):
self._initialize_prior(len(y))
if (y >= 0).all():
self._pos_prior += y
elif (y <= 0).all():
self._neg_prior += y
def _maybe_convert_index(self, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
ax = self._get_ax(0)
freq = getattr(ax, 'freq', None)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data.index = data.index.to_period(freq=freq)
return data
def _post_plot_logic(self):
df = self.data
condition = (not self._use_dynamic_x()
and df.index.is_all_dates
and not self.subplots
or (self.subplots and self.sharex))
index_name = self._get_index_name()
for ax in self.axes:
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
def _get_plot_function(self):
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
else:
f = MPLPlot._get_plot_function(self)
def plotf(ax, x, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.data))
y_values = self._get_stacked_values(y, kwds['label'])
lines = f(ax, x, y_values, style=style, **kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
if (y >= 0).all():
start = self._pos_prior
elif (y <= 0).all():
start = self._neg_prior
else:
start = np.zeros(len(y))
if not 'color' in kwds:
kwds['color'] = lines[0].get_color()
self.plt.Axes.fill_between(ax, xdata, start, y_values, **kwds)
self._update_prior(y)
return lines
return plotf
def _add_legend_handle(self, handle, label, index=None):
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(), alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self):
LinePlot._post_plot_logic(self)
if self.ylim is None:
if (self.data >= 0).all().all():
for ax in self.axes:
ax.set_ylim(0, None)
elif (self.data <= 0).all().all():
for ax in self.axes:
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_default_rot = {'bar': 90, 'barh': 0}
def __init__(self, data, **kwargs):
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log',False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if com.is_list_like(self.left):
self.left = np.array(self.left)
def _get_plot_function(self):
if self.kind == 'bar':
def f(ax, x, y, w, start=None, **kwds):
start = start + self.bottom
return ax.bar(x, y, w, bottom=start, log=self.log, **kwds)
elif self.kind == 'barh':
def f(ax, x, y, w, start=None, log=self.log, **kwds):
start = start + self.left
return ax.barh(x, y, w, left=start, log=self.log, **kwds)
else:
raise ValueError("BarPlot kind must be either 'bar' or 'barh'")
return f
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
bar_f = self._get_plot_function()
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = com.pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
if self.subplots:
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior)
w = self.bar_width / 2
rect = bar_f(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = bar_f(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self):
for ax in self.axes:
if self.use_index:
str_index = [com.pprint_thing(key) for key in self.data.index]
else:
str_index = [com.pprint_thing(key) for key in
range(self.data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
if self.kind == 'bar':
ax.set_xlim((s_edge, e_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(str_index)
if name is not None and self.use_index:
ax.set_xlabel(name)
elif self.kind == 'barh':
# horizontal bars
ax.set_ylim((s_edge, e_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(str_index)
if name is not None and self.use_index:
ax.set_ylabel(name)
else:
raise NotImplementedError(self.kind)
@property
def orientation(self):
if self.kind == 'bar':
return 'vertical'
elif self.kind == 'barh':
return 'horizontal'
else:
raise NotImplementedError(self.kind)
class HistPlot(LinePlot):
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if com.is_integer(self.bins):
# create common bin edge
values = self.data.convert_objects()._get_numeric_data()
values = np.ravel(values)
values = values[~com.isnull(values)]
hist, self.bins = np.histogram(values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if com.is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
def _get_plot_function(self):
def plotf(ax, y, style=None, column_num=None, **kwds):
if column_num == 0:
self._initialize_prior(len(self.bins) - 1)
y = y[~com.isnull(y)]
bottom = self._pos_prior + self.bottom
# ignore style
n, bins, patches = self.plt.Axes.hist(ax, y, bins=self.bins,
bottom=bottom, **kwds)
self._update_prior(n)
return patches
return plotf
def _make_plot(self):
plotf = self._get_plot_function()
colors = self._get_colors()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
style = self._get_style(i, label)
label = com.pprint_thing(label)
kwds = self.kwds.copy()
kwds['label'] = label
self._maybe_add_color(colors, kwds, style, i)
if style is not None:
kwds['style'] = style
artists = plotf(ax, y, column_num=i, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _post_plot_logic(self):
if self.orientation == 'horizontal':
for ax in self.axes:
ax.set_xlabel('Degree')
else:
for ax in self.axes:
ax.set_ylabel('Degree')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
sample_range = max(y) - min(y)
ind = np.linspace(min(y) - 0.5 * sample_range,
max(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
def _get_plot_function(self):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
f = MPLPlot._get_plot_function(self)
def plotf(ax, y, style=None, column_num=None, **kwds):
y = remove_na(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=self.bw_method)
else:
gkde = gaussian_kde(y)
if self.bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is %s.' % spv)
warnings.warn(msg)
ind = self._get_ind(y)
y = gkde.evaluate(ind)
lines = f(ax, ind, y, style=style, **kwds)
return lines
return plotf
def _post_plot_logic(self):
for ax in self.axes:
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
self.kwds.setdefault('colors', self._get_colors(num_colors=len(self.data),
color_kwds='colors'))
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = com.pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [com.pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(label, value) for
label, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type=None, **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
def _get_plot_function(self):
def plotf(ax, y, column_num=None, **kwds):
if y.ndim == 2:
y = [remove_na(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na(y)
bp = ax.boxplot(y, **kwds)
if self.return_type == 'dict':
return bp, bp
elif self.return_type == 'both':
return self.BP(ax=ax, lines=bp), bp
else:
return ax, bp
return plotf
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid key '{0}' "
"The key must be either {1}".format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
plotf = self._get_plot_function()
if self.subplots:
self._return_obj = compat.OrderedDict()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=i, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [com.pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = plotf(ax, y, column_num=0, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, y in self._iter_data()]
labels = [com.pprint_thing(l) for l in labels]
if not self.use_index:
labels = [com.pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self):
pass
@property
def orientation(self):
if self.kwds.get('vert', True):
return 'vertical'
else:
return 'horizontal'
@property
def result(self):
if self.return_type is None:
return super(BoxPlot, self).result
else:
return self._return_obj
# kinds supported by both dataframe and series
_common_kinds = ['line', 'bar', 'barh', 'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
_series_kinds = ['pie']
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_plot_klass = {'line': LinePlot, 'bar': BarPlot, 'barh': BarPlot,
'kde': KdePlot, 'hist': HistPlot, 'box': BoxPlot,
'scatter': ScatterPlot, 'hexbin': HexBinPlot,
'area': AreaPlot, 'pie': PiePlot}
def _plot(data, x=None, y=None, subplots=False,
ax=None, kind='line', **kwds):
kind = _get_standard_kind(kind.lower().strip())
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
from pandas import DataFrame
if kind in _dataframe_kinds:
if isinstance(data, DataFrame):
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
raise ValueError("plot kind %r can only be used for data frames"
% kind)
elif kind in _series_kinds:
if isinstance(data, DataFrame):
if y is None and subplots is False:
msg = "{0} requires either y column or 'subplots=True'"
raise ValueError(msg.format(kind))
elif y is not None:
if com.is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
if isinstance(data, DataFrame):
if x is not None:
if com.is_integer(x) and not data.columns.holds_integer():
x = data.columns[x]
data = data.set_index(x)
if y is not None:
if com.is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
label = kwds['label'] if 'label' in kwds else y
series = data[y].copy() # Don't modify
series.name = label
for kw in ['xerr', 'yerr']:
if (kw in kwds) and \
(isinstance(kwds[kw], string_types) or
com.is_integer(kwds[kw])):
try:
kwds[kw] = data[kwds[kw]]
except (IndexError, KeyError, TypeError):
pass
data = series
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
df_kind = """- 'scatter' : scatter plot
- 'hexbin' : hexbin plot"""
series_kind = ""
df_coord = """x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another"""
series_coord = ""
df_unique = """stacked : boolean, default False in line and
bar plots, and True in area plot. If True, create stacked plot.
sort_columns : boolean, default False
Sort column names to determine plot ordering
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If a list/tuple, which columns to plot on secondary y-axis"""
series_unique = """label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right"""
df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
Make separate subplots for each column
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all axis in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
layout : tuple (optional)
(rows, columns) for the layout of subplots"""
series_ax = """ax : matplotlib axes object
If not passed, uses gca()"""
df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe
column, the values of that column are used to color each point.
- If `kind` = 'hexbin', you can control the size of the bins with the
`gridsize` argument. By default, a histogram of the counts around each
`(x, y)` point is computed. You can specify alternative aggregations
by passing values to the `C` and `reduce_C_function` arguments.
`C` specifies the value at each `(x, y)` point and `reduce_C_function`
is a function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`)."""
series_note = ""
_shared_doc_df_kwargs = dict(klass='DataFrame', klass_kind=df_kind,
klass_coord=df_coord, klass_ax=df_ax,
klass_unique=df_unique, klass_note=df_note)
_shared_doc_series_kwargs = dict(klass='Series', klass_kind=series_kind,
klass_coord=series_coord, klass_ax=series_ax,
klass_unique=series_unique,
klass_note=series_note)
_shared_docs['plot'] = """
Make plots of %(klass)s using matplotlib / pylab.
Parameters
----------
data : %(klass)s
%(klass_coord)s
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
%(klass_kind)s
%(klass_ax)s
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string
Title to use for the plot
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
layout : tuple (optional)
(rows, columns) for the layout of the plot
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for detail.
xerr : same types as yerr.
%(klass_unique)s
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
%(klass_note)s
"""
@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
def plot_frame(data, x=None, y=None, kind='line', ax=None, # Dataframe unique
subplots=False, sharex=None, sharey=False, layout=None, # Dataframe unique
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False, # Dataframe unique
**kwds):
return _plot(data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
secondary_y=secondary_y, sort_columns=sort_columns,
**kwds)
@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
def plot_series(data, kind='line', ax=None, # Series unique
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, # Series unique
**kwds):
import matplotlib.pyplot as plt
"""
If no axes is specified, check whether there are existing figures
If there is no existing figures, _gca() will
create a figure with the default figsize, causing the figsize=parameter to
be ignored.
"""
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(data, kind=kind, ax=ax,
figsize=figsize, use_index=use_index, title=title,
grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
label=label, secondary_y=secondary_y,
**kwds)
_shared_docs['boxplot'] = """
Make a box plot from DataFrame column optionally grouped by some columns or
other inputs
Parameters
----------
data : the pandas object holding the data
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
ax : Matplotlib axes object, optional
fontsize : int or string
rot : label rotation angle
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
layout : tuple (optional)
(rows, columns) for the layout of the plot
return_type : {'axes', 'dict', 'both'}, default 'dict'
The kind of object to return. 'dict' returns a dictionary
whose values are the matplotlib Lines of the boxplot;
'axes' returns the matplotlib axes the boxplot is drawn on;
'both' returns a namedtuple with the axes and dict.
When grouping with ``by``, a dict mapping columns to ``return_type``
is returned.
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
lines : dict
ax : matplotlib Axes
(ax, lines): namedtuple
Notes
-----
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
"""
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}")
from pandas import Series, DataFrame
if isinstance(data, Series):
data = DataFrame({'x': data})
column = 'x'
def _get_colors():
return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
if 'color' not in kwds:
from matplotlib.artist import setp
setp(bp['boxes'], color=colors[0], alpha=1)
setp(bp['whiskers'], color=colors[0], alpha=1)
setp(bp['medians'], color=colors[2], alpha=1)
def plot_group(keys, values, ax):
keys = [com.pprint_thing(x) for x in keys]
values = [remove_na(v) for v in values]
bp = ax.boxplot(values, **kwds)
if kwds.get('vert', 1):
ax.set_xticklabels(keys, rotation=rot, fontsize=fontsize)
else:
ax.set_yticklabels(keys, rotation=rot, fontsize=fontsize)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == 'dict':
return bp
elif return_type == 'both':
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
return_type=return_type)
else:
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
if return_type is None:
msg = ("\nThe default value for 'return_type' will change to "
"'axes' in a future release.\n To use the future behavior "
"now, set return_type='axes'.\n To keep the previous "
"behavior and silence this warning, set "
"return_type='dict'.")
warnings.warn(msg, FutureWarning)
return_type = 'dict'
if ax is None:
ax = _gca()
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
try:
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
except Exception: # pragma: no cover
pass
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwargs.setdefault('c', plt.rcParams['patch.facecolor'])
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(com.pprint_thing(y))
ax.set_xlabel(com.pprint_thing(x))
ax.grid(grid)
return fig
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Draw histogram of the DataFrame's series using matplotlib / pylab.
Parameters
----------
data : DataFrame
column : string or sequence
If passed, will be used to limit data to a subset of columns
by : object, optional
If passed, then used to form histograms for separate groups
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all subplots in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
figsize : tuple
The size of the figure to create in inches by default
layout: (optional) a tuple (rows, columns) for the layout of the histograms
bins: integer, default 10
Number of histogram bins to be used
kwds : other plotting keyword arguments
To be passed to hist function
"""
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize,
sharex=sharex, sharey=sharey, layout=layout, bins=bins,
xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,
**kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, Index)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(com._try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None, bins=10, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins: integer, default 10
Number of histogram bins to be used
kwds : keywords
To be passed to the actual plotting function
Notes
-----
See matplotlib documentation online for more on this
"""
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize, bins=bins,
xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,
**kwds)
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
layout=None, sharex=False, sharey=False, rot=90, grid=True,
xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
**kwargs):
"""
Grouped histogram
Parameters
----------
data: Series/DataFrame
column: object, optional
by: object, optional
ax: axes, optional
bins: int, default 50
figsize: tuple, optional
layout: optional
sharex: boolean, default False
sharey: boolean, default False
rot: int, default 90
grid: bool, default True
kwargs: dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
axes: collection of Matplotlib Axes
"""
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
xrot = xrot or rot
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey, ax=ax,
figsize=figsize, layout=layout, rot=rot)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.5, wspace=0.3)
return axes
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, **kwds):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots :
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import pandas
>>> import numpy as np
>>> import itertools
>>>
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=False, sharey=True, figsize=figsize,
layout=layout)
axes = _flatten(axes)
ret = compat.OrderedDict()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(com.pprint_thing(key))
ret[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
else:
from pandas.tools.merge import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize, layout=layout, **kwds)
return ret
def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize=None, sharex=True, sharey=True, layout=None,
rot=0, ax=None, **kwargs):
from pandas import DataFrame
if figsize == 'default':
# allowed to specify mpl default with 'default'
warnings.warn("figsize='default' is deprecated. Specify figure"
"size by tuple instead", FutureWarning)
figsize = None
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, figsize=figsize,
sharex=sharex, sharey=sharey, ax=ax,
layout=layout)
_axes = _flatten(axes)
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
if numeric_only and isinstance(group, DataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(com.pprint_thing(key))
return fig, axes
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None, ax=None, layout=None, return_type=None,
**kwargs):
grouped = data.groupby(by)
if columns is None:
if not isinstance(by, (list, tuple)):
by = [by]
columns = data._get_numeric_data().columns.difference(by)
naxes = len(columns)
fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True,
figsize=figsize, ax=ax, layout=layout)
_axes = _flatten(axes)
result = compat.OrderedDict()
for i, col in enumerate(columns):
ax = _axes[i]
gp_col = grouped[col]
keys, values = zip(*gp_col)
re_plotf = plotf(keys, values, ax, **kwargs)
ax.set_title(col)
ax.set_xlabel(com.pprint_thing(by))
result[col] = re_plotf
ax.grid(grid)
# Return axes in multiplot case, maybe revisit later # 985
if return_type is None:
result = axes
byline = by[0] if len(by) == 1 else by
fig.suptitle('Boxplot grouped by %s' % byline)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return result
def table(ax, data, rowLabels=None, colLabels=None,
**kwargs):
"""
Helper function to convert DataFrame and Series to matplotlib.table
Parameters
----------
`ax`: Matplotlib axes object
`data`: DataFrame or Series
data for table contents
`kwargs`: keywords, optional
keyword arguments which passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column name will be used.
Returns
-------
matplotlib table object
"""
from pandas import DataFrame
if isinstance(data, Series):
data = DataFrame(data, columns=[data.name])
elif isinstance(data, DataFrame):
pass
else:
raise ValueError('Input data must be DataFrame or Series')
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
import matplotlib.table
table = matplotlib.table.table(ax, cellText=cellText,
rowLabels=rowLabels, colLabels=colLabels, **kwargs)
return table
def _get_layout(nplots, layout=None, layout_type='box'):
if layout is not None:
if not isinstance(layout, (tuple, list)) or len(layout) != 2:
raise ValueError('Layout must be a tuple of (rows, columns)')
nrows, ncols = layout
# Python 2 compat
ceil_ = lambda x: int(ceil(x))
if nrows == -1 and ncols >0:
layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
elif ncols == -1 and nrows > 0:
layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
if nrows * ncols < nplots:
raise ValueError('Layout of %sx%s must be larger than required size %s' %
(nrows, ncols, nplots))
return layout
if layout_type == 'single':
return (1, 1)
elif layout_type == 'horizontal':
return (1, nplots)
elif layout_type == 'vertical':
return (nplots, 1)
layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}
try:
return layouts[nplots]
except KeyError:
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py and modified for pandas.plotting
def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, layout=None, layout_type='box', **fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing at all is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horziontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if com.is_list_like(ax):
ax = _flatten(ax)
if layout is not None:
warnings.warn("When passing multiple axes, layout keyword is ignored", UserWarning)
if sharex or sharey:
warnings.warn("When passing multiple axes, sharex and sharey are ignored."
"These settings must be specified when creating axes", UserWarning)
if len(ax) == naxes:
fig = ax[0].get_figure()
return fig, ax
else:
raise ValueError("The number of passed axes must be {0}, the same as "
"the output plot".format(naxes))
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, _flatten(ax)
else:
warnings.warn("To output multiple subplots, the figure containing the passed axes "
"is being cleared", UserWarning)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds['sharex'] = None
kwds['sharey'] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
def _remove_xlabels_from_axis(ax):
for label in ax.get_xticklabels():
label.set_visible(False)
try:
# set_visible will not be effective if
# minor axis has NullLocator and NullFormattor (default)
import matplotlib.ticker as ticker
if isinstance(ax.xaxis.get_minor_locator(), ticker.NullLocator):
ax.xaxis.set_minor_locator(ticker.AutoLocator())
if isinstance(ax.xaxis.get_minor_formatter(), ticker.NullFormatter):
ax.xaxis.set_minor_formatter(ticker.FormatStrFormatter(''))
for label in ax.get_xticklabels(minor=True):
label.set_visible(False)
except Exception: # pragma no cover
pass
ax.xaxis.get_label().set_visible(False)
def _remove_ylables_from_axis(ax):
for label in ax.get_yticklabels():
label.set_visible(False)
try:
import matplotlib.ticker as ticker
if isinstance(ax.yaxis.get_minor_locator(), ticker.NullLocator):
ax.yaxis.set_minor_locator(ticker.AutoLocator())
if isinstance(ax.yaxis.get_minor_formatter(), ticker.NullFormatter):
ax.yaxis.set_minor_formatter(ticker.FormatStrFormatter(''))
for label in ax.get_yticklabels(minor=True):
label.set_visible(False)
except Exception: # pragma no cover
pass
ax.yaxis.get_label().set_visible(False)
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
# first find out the ax layout, so that we can correctly handle 'gaps"
layout = np.zeros((nrows+1,ncols+1), dtype=np.bool)
for ax in axarr:
layout[ax.rowNum, ax.colNum] = ax.get_visible()
if sharex and nrows > 1:
for ax in axarr:
# only the last row of subplots should get x labels -> all other off
# layout handles the case that the subplot is the last in the column,
# because below is no subplot/gap.
if not layout[ax.rowNum+1, ax.colNum]:
continue
_remove_xlabels_from_axis(ax)
if sharey and ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to off
# as we only have labels in teh first column and we always have a subplot there,
# we can skip the layout test
if ax.is_first_col():
continue
_remove_ylables_from_axis(ax)
def _flatten(axes):
if not com.is_list_like(axes):
return np.array([axes])
elif isinstance(axes, (np.ndarray, Index)):
return axes.ravel()
return np.array(axes)
def _get_all_lines(ax):
lines = ax.get_lines()
if hasattr(ax, 'right_ax'):
lines += ax.right_ax.get_lines()
if hasattr(ax, 'left_ax'):
lines += ax.left_ax.get_lines()
return lines
def _get_xlim(lines):
left, right = np.inf, -np.inf
for l in lines:
x = l.get_xdata(orig=False)
left = min(x[0], left)
right = max(x[-1], right)
return left, right
def _set_ticks_props(axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
import matplotlib.pyplot as plt
for ax in _flatten(axes):
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
if __name__ == '__main__':
# import pandas.rpy.common as com
# sales = com.load_data('sanfrancisco.home.sales', package='nutshell')
# top10 = sales['zip'].value_counts()[:10].index
# sales2 = sales[sales.zip.isin(top10)]
# _ = scatter_plot(sales2, 'squarefeet', 'price', by='zip')
# plt.show()
import matplotlib.pyplot as plt
import pandas.tools.plotting as plots
import pandas.core.frame as fr
reload(plots)
reload(fr)
from pandas.core.frame import DataFrame
data = DataFrame([[3, 6, -5], [4, 8, 2], [4, 9, -6],
[4, 9, -3], [2, 5, -1]],
columns=['A', 'B', 'C'])
data.plot(kind='barh', stacked=True)
plt.show()
| mit |
robbymeals/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
selva86/python-machine-learning | regression/finance_regression.py | 2 | 2098 | #!/usr/bin/python
"""
starter code for the regression mini-project
loads up/formats a modified version of the dataset
(why modified? we've removed some trouble points
that you'll find yourself in the outliers mini-project)
draws a little scatterplot of the training/testing data
you fill in the regression code where indicated
"""
import sys
import pickle
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
dictionary = pickle.load( open("../final_project/final_project_dataset_modified.pkl", "r") )
### list the features you want to look at--first item in the
### list will be the "target" feature
features_list = ["bonus", "salary"]
data = featureFormat( dictionary, features_list, remove_any_zeroes=True)
target, features = targetFeatureSplit( data )
### training-testing split needed in regression, just like classification
from sklearn.cross_validation import train_test_split
feature_train, feature_test, target_train, target_test = train_test_split(features, target, test_size=0.5, random_state=42)
train_color = "b"
test_color = "b"
### Your regression goes here!
### Please name it reg, so that the plotting code below picks it up and
### plots it correctly. Don't forget to change the test_color from "b" to "r"
### to differentiate training points from test points.
### draw the scatterplot, with color-coded training and testing points
import matplotlib.pyplot as plt
for feature, target in zip(feature_test, target_test):
plt.scatter( feature, target, color=test_color )
for feature, target in zip(feature_train, target_train):
plt.scatter( feature, target, color=train_color )
### labels for the legend
plt.scatter(feature_test[0], target_test[0], color=test_color, label="test")
plt.scatter(feature_test[0], target_test[0], color=train_color, label="train")
### draw the regression line, once it's coded
try:
plt.plot( feature_test, reg.predict(feature_test) )
except NameError:
pass
plt.xlabel(features_list[1])
plt.ylabel(features_list[0])
plt.legend()
plt.show()
| mit |
massmutual/scikit-learn | sklearn/tree/tests/test_tree.py | 8 | 48235 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import ignore_warnings
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y )
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
| bsd-3-clause |
mdbartos/RIPS | temporary/trans_line_parser.py | 1 | 1847 | import pandas as pd
import numpy as np
d = {}
for i in range (2001, 2011):
d.update({i : pd.read_excel('schedule6_%s.xls' % (i), skiprows=6)})
c = pd.concat([i[['NERC Region', 'Design (kV)', 'Size (MCM)', 'Material', 'Bundling Arrangement']] for i in d.values()]).dropna()
c['NERC Region'] = c['NERC Region'].str.strip()
c['Material'] = c['Material'].str.replace('^ACSR.+', 'ACSR').str.replace('^ACSS.+', 'ACSS').str.replace('^CU.+', 'CU').replace('AA', 'AAC').replace('AL', 'AAC').replace('Copper', 'CU').replace('Aluminum', 'AAC')
c = c[c['Material'].isin(['ACSR', 'ACSS', 'AAC', 'CU'])]
c['Design (kV)'] = c['Design (kV)'].astype(str).str.replace('kV', '').str.replace('*', '').str.strip().replace('100-120', '115').replace('200-299', '230').replace('151-199', '161').replace('400-599', '500').replace('121-150', '138').replace('300-399', '345').replace('161/115', '161').replace('500/230', '500').astype(float)
c = c[c['Design (kV)'].isin([230, 115, 345, 500, 138, 161])]
c = c[c['Size (MCM)'].apply(np.isreal)]
c['Size (MCM)'] = c['Size (MCM)'].astype(float)
c = c[(c['Size (MCM)'] < 9999) & (c['Size (MCM)'] > 0)]
c['Bundling Arrangement'] = c['Bundling Arrangement'].str.replace('.*ingle', '1.0').str.replace('.*ouble', '2.0').str.replace('.*riple', '3.0').str.replace('per phase', '').str.strip()
c['Bundling Arrangement'] = c['Bundling Arrangement'].str.replace('1.0', '1').str.replace('2.0', '2').str.replace('3.0', '3')
c = c.dropna()
c = c[c['Bundling Arrangement'].str.isdigit()]
c['Size (MCM)'] = c['Size (MCM)']*c['Bundling Arrangement'].astype(float)
s = c.groupby(['NERC Region', 'Design (kV)', 'Material'])['Size (MCM)']
#s.value_counts()
boxplot(np.asarray([c[c['Design (kV)'] == i]['Size (MCM)'].values for i in sorted(c['Design (kV)'].unique().tolist())]), labels=sorted(c['Design (kV)'].unique().tolist()))
| mit |
a-doumoulakis/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 37 | 3774 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers."""
# Create three fully connected layers respectively of size 10, 20, and 10.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Convert the labels to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
onehot_labels = tf.one_hot(labels, 3, 1, 0)
# Compute loss.
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Create training op with exponentially decaying learning rate.
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
dmlc/xgboost | python-package/xgboost/training.py | 1 | 21199 | # coding: utf-8
# pylint: disable=too-many-locals, too-many-arguments, invalid-name
# pylint: disable=too-many-branches, too-many-statements
"""Training Library containing training routines."""
import warnings
import copy
import numpy as np
from .core import Booster, XGBoostError, _get_booster_layer_trees
from .compat import (SKLEARN_INSTALLED, XGBStratifiedKFold)
from . import callback
def _configure_deprecated_callbacks(
verbose_eval, early_stopping_rounds, maximize, start_iteration,
num_boost_round, feval, evals_result, callbacks, show_stdv, cvfolds):
link = 'https://xgboost.readthedocs.io/en/latest/python/callbacks.html'
warnings.warn(f'Old style callback is deprecated. See: {link}', UserWarning)
# Most of legacy advanced options becomes callbacks
if early_stopping_rounds is not None:
callbacks.append(callback.early_stop(early_stopping_rounds,
maximize=maximize,
verbose=bool(verbose_eval)))
if isinstance(verbose_eval, bool) and verbose_eval:
callbacks.append(callback.print_evaluation(show_stdv=show_stdv))
else:
if isinstance(verbose_eval, int):
callbacks.append(callback.print_evaluation(verbose_eval,
show_stdv=show_stdv))
if evals_result is not None:
callbacks.append(callback.record_evaluation(evals_result))
callbacks = callback.LegacyCallbacks(
callbacks, start_iteration, num_boost_round, feval, cvfolds=cvfolds)
return callbacks
def _is_new_callback(callbacks):
return any(isinstance(c, callback.TrainingCallback)
for c in callbacks) or not callbacks
def _train_internal(params, dtrain,
num_boost_round=10, evals=(),
obj=None, feval=None,
xgb_model=None, callbacks=None,
evals_result=None, maximize=None,
verbose_eval=None, early_stopping_rounds=None):
"""internal training function"""
callbacks = [] if callbacks is None else copy.copy(callbacks)
evals = list(evals)
bst = Booster(params, [dtrain] + [d[0] for d in evals])
if xgb_model is not None:
bst = Booster(params, [dtrain] + [d[0] for d in evals],
model_file=xgb_model)
start_iteration = 0
is_new_callback = _is_new_callback(callbacks)
if is_new_callback:
assert all(isinstance(c, callback.TrainingCallback)
for c in callbacks), "You can't mix new and old callback styles."
if verbose_eval:
verbose_eval = 1 if verbose_eval is True else verbose_eval
callbacks.append(callback.EvaluationMonitor(period=verbose_eval))
if early_stopping_rounds:
callbacks.append(callback.EarlyStopping(
rounds=early_stopping_rounds, maximize=maximize))
callbacks = callback.CallbackContainer(callbacks, metric=feval)
else:
callbacks = _configure_deprecated_callbacks(
verbose_eval, early_stopping_rounds, maximize, start_iteration,
num_boost_round, feval, evals_result, callbacks,
show_stdv=False, cvfolds=None)
bst = callbacks.before_training(bst)
for i in range(start_iteration, num_boost_round):
if callbacks.before_iteration(bst, i, dtrain, evals):
break
bst.update(dtrain, i, obj)
if callbacks.after_iteration(bst, i, dtrain, evals):
break
bst = callbacks.after_training(bst)
if evals_result is not None and is_new_callback:
evals_result.update(callbacks.history)
# These should be moved into callback functions `after_training`, but until old
# callbacks are removed, the train function is the only place for setting the
# attributes.
num_parallel_tree, _ = _get_booster_layer_trees(bst)
if bst.attr('best_score') is not None:
bst.best_score = float(bst.attr('best_score'))
bst.best_iteration = int(bst.attr('best_iteration'))
# num_class is handled internally
bst.set_attr(
best_ntree_limit=str((bst.best_iteration + 1) * num_parallel_tree)
)
bst.best_ntree_limit = int(bst.attr("best_ntree_limit"))
else:
# Due to compatibility with version older than 1.4, these attributes are added
# to Python object even if early stopping is not used.
bst.best_iteration = bst.num_boosted_rounds() - 1
bst.best_ntree_limit = (bst.best_iteration + 1) * num_parallel_tree
# Copy to serialise and unserialise booster to reset state and free
# training memory
return bst.copy()
def train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None,
maximize=None, early_stopping_rounds=None, evals_result=None,
verbose_eval=True, xgb_model=None, callbacks=None):
# pylint: disable=too-many-statements,too-many-branches, attribute-defined-outside-init
"""Train a booster with given parameters.
Parameters
----------
params : dict
Booster params.
dtrain : DMatrix
Data to be trained.
num_boost_round: int
Number of boosting iterations.
evals: list of pairs (DMatrix, string)
List of validation sets for which metrics will evaluated during training.
Validation metrics will help us track the performance of the model.
obj : function
Customized objective function.
feval : function
Customized evaluation function.
maximize : bool
Whether to maximize feval.
early_stopping_rounds: int
Activates early stopping. Validation metric needs to improve at least once in
every **early_stopping_rounds** round(s) to continue training.
Requires at least one item in **evals**.
The method returns the model from the last iteration (not the best one). Use
custom callback or model slicing if the best model is desired.
If there's more than one item in **evals**, the last entry will be used for early
stopping.
If there's more than one metric in the **eval_metric** parameter given in
**params**, the last metric will be used for early stopping.
If early stopping occurs, the model will have three additional fields:
``bst.best_score``, ``bst.best_iteration``.
evals_result: dict
This dictionary stores the evaluation results of all the items in watchlist.
Example: with a watchlist containing
``[(dtest,'eval'), (dtrain,'train')]`` and
a parameter containing ``('eval_metric': 'logloss')``,
the **evals_result** returns
.. code-block:: python
{'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}
verbose_eval : bool or int
Requires at least one item in **evals**.
If **verbose_eval** is True then the evaluation metric on the validation set is
printed at each boosting stage.
If **verbose_eval** is an integer then the evaluation metric on the validation set
is printed at every given **verbose_eval** boosting stage. The last boosting stage
/ the boosting stage found by using **early_stopping_rounds** is also printed.
Example: with ``verbose_eval=4`` and at least one item in **evals**, an evaluation metric
is printed every 4 boosting stages, instead of every boosting stage.
xgb_model : file name of stored xgb model or 'Booster' instance
Xgb model to be loaded before training (allows training continuation).
callbacks : list of callback functions
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using
:ref:`Callback API <callback_api>`.
Example:
.. code-block:: python
[xgb.callback.LearningRateScheduler(custom_rates)]
Returns
-------
Booster : a trained booster model
"""
bst = _train_internal(params, dtrain,
num_boost_round=num_boost_round,
evals=evals,
obj=obj, feval=feval,
xgb_model=xgb_model, callbacks=callbacks,
verbose_eval=verbose_eval,
evals_result=evals_result,
maximize=maximize,
early_stopping_rounds=early_stopping_rounds)
return bst
class CVPack(object):
""""Auxiliary datastruct to hold one fold of CV."""
def __init__(self, dtrain, dtest, param):
""""Initialize the CVPack"""
self.dtrain = dtrain
self.dtest = dtest
self.watchlist = [(dtrain, 'train'), (dtest, 'test')]
self.bst = Booster(param, [dtrain, dtest])
def __getattr__(self, name):
def _inner(*args, **kwargs):
return getattr(self.bst, name)(*args, **kwargs)
return _inner
def update(self, iteration, fobj):
""""Update the boosters for one iteration"""
self.bst.update(self.dtrain, iteration, fobj)
def eval(self, iteration, feval):
""""Evaluate the CVPack for one iteration."""
return self.bst.eval_set(self.watchlist, iteration, feval)
class _PackedBooster:
def __init__(self, cvfolds) -> None:
self.cvfolds = cvfolds
def update(self, iteration, obj):
'''Iterate through folds for update'''
for fold in self.cvfolds:
fold.update(iteration, obj)
def eval(self, iteration, feval):
'''Iterate through folds for eval'''
result = [f.eval(iteration, feval) for f in self.cvfolds]
return result
def set_attr(self, **kwargs):
'''Iterate through folds for setting attributes'''
for f in self.cvfolds:
f.bst.set_attr(**kwargs)
def attr(self, key):
'''Redirect to booster attr.'''
return self.cvfolds[0].bst.attr(key)
def set_param(self, params, value=None):
"""Iterate through folds for set_param"""
for f in self.cvfolds:
f.bst.set_param(params, value)
def num_boosted_rounds(self):
'''Number of boosted rounds.'''
return self.cvfolds[0].num_boosted_rounds()
@property
def best_iteration(self):
'''Get best_iteration'''
return int(self.cvfolds[0].bst.attr("best_iteration"))
@property
def best_score(self):
"""Get best_score."""
return float(self.cvfolds[0].bst.attr("best_score"))
def groups_to_rows(groups, boundaries):
"""
Given group row boundaries, convert ground indexes to row indexes
:param groups: list of groups for testing
:param boundaries: rows index limits of each group
:return: row in group
"""
return np.concatenate([np.arange(boundaries[g], boundaries[g+1]) for g in groups])
def mkgroupfold(dall, nfold, param, evals=(), fpreproc=None, shuffle=True):
"""
Make n folds for cross-validation maintaining groups
:return: cross-validation folds
"""
# we have groups for pairwise ranking... get a list of the group indexes
group_boundaries = dall.get_uint_info('group_ptr')
group_sizes = np.diff(group_boundaries)
if shuffle is True:
idx = np.random.permutation(len(group_sizes))
else:
idx = np.arange(len(group_sizes))
# list by fold of test group indexes
out_group_idset = np.array_split(idx, nfold)
# list by fold of train group indexes
in_group_idset = [np.concatenate([out_group_idset[i] for i in range(nfold) if k != i])
for k in range(nfold)]
# from the group indexes, convert them to row indexes
in_idset = [groups_to_rows(in_groups, group_boundaries) for in_groups in in_group_idset]
out_idset = [groups_to_rows(out_groups, group_boundaries) for out_groups in out_group_idset]
# build the folds by taking the appropriate slices
ret = []
for k in range(nfold):
# perform the slicing using the indexes determined by the above methods
dtrain = dall.slice(in_idset[k], allow_groups=True)
dtrain.set_group(group_sizes[in_group_idset[k]])
dtest = dall.slice(out_idset[k], allow_groups=True)
dtest.set_group(group_sizes[out_group_idset[k]])
# run preprocessing on the data set if needed
if fpreproc is not None:
dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())
else:
tparam = param
plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]
ret.append(CVPack(dtrain, dtest, plst))
return ret
def mknfold(dall, nfold, param, seed, evals=(), fpreproc=None, stratified=False,
folds=None, shuffle=True):
"""
Make an n-fold list of CVPack from random indices.
"""
evals = list(evals)
np.random.seed(seed)
if stratified is False and folds is None:
# Do standard k-fold cross validation. Automatically determine the folds.
if len(dall.get_uint_info('group_ptr')) > 1:
return mkgroupfold(dall, nfold, param, evals=evals, fpreproc=fpreproc, shuffle=shuffle)
if shuffle is True:
idx = np.random.permutation(dall.num_row())
else:
idx = np.arange(dall.num_row())
out_idset = np.array_split(idx, nfold)
in_idset = [np.concatenate([out_idset[i] for i in range(nfold) if k != i])
for k in range(nfold)]
elif folds is not None:
# Use user specified custom split using indices
try:
in_idset = [x[0] for x in folds]
out_idset = [x[1] for x in folds]
except TypeError:
# Custom stratification using Sklearn KFoldSplit object
splits = list(folds.split(X=dall.get_label(), y=dall.get_label()))
in_idset = [x[0] for x in splits]
out_idset = [x[1] for x in splits]
nfold = len(out_idset)
else:
# Do standard stratefied shuffle k-fold split
sfk = XGBStratifiedKFold(n_splits=nfold, shuffle=True, random_state=seed)
splits = list(sfk.split(X=dall.get_label(), y=dall.get_label()))
in_idset = [x[0] for x in splits]
out_idset = [x[1] for x in splits]
nfold = len(out_idset)
ret = []
for k in range(nfold):
# perform the slicing using the indexes determined by the above methods
dtrain = dall.slice(in_idset[k])
dtest = dall.slice(out_idset[k])
# run preprocessing on the data set if needed
if fpreproc is not None:
dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())
else:
tparam = param
plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]
ret.append(CVPack(dtrain, dtest, plst))
return ret
def cv(params, dtrain, num_boost_round=10, nfold=3, stratified=False, folds=None,
metrics=(), obj=None, feval=None, maximize=None, early_stopping_rounds=None,
fpreproc=None, as_pandas=True, verbose_eval=None, show_stdv=True,
seed=0, callbacks=None, shuffle=True):
# pylint: disable = invalid-name
"""Cross-validation with given parameters.
Parameters
----------
params : dict
Booster params.
dtrain : DMatrix
Data to be trained.
num_boost_round : int
Number of boosting iterations.
nfold : int
Number of folds in CV.
stratified : bool
Perform stratified sampling.
folds : a KFold or StratifiedKFold instance or list of fold indices
Sklearn KFolds or StratifiedKFolds object.
Alternatively may explicitly pass sample indices for each fold.
For ``n`` folds, **folds** should be a length ``n`` list of tuples.
Each tuple is ``(in,out)`` where ``in`` is a list of indices to be used
as the training samples for the ``n`` th fold and ``out`` is a list of
indices to be used as the testing samples for the ``n`` th fold.
metrics : string or list of strings
Evaluation metrics to be watched in CV.
obj : function
Custom objective function.
feval : function
Custom evaluation function.
maximize : bool
Whether to maximize feval.
early_stopping_rounds: int
Activates early stopping. Cross-Validation metric (average of validation
metric computed over CV folds) needs to improve at least once in
every **early_stopping_rounds** round(s) to continue training.
The last entry in the evaluation history will represent the best iteration.
If there's more than one metric in the **eval_metric** parameter given in
**params**, the last metric will be used for early stopping.
fpreproc : function
Preprocessing function that takes (dtrain, dtest, param) and returns
transformed versions of those.
as_pandas : bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return np.ndarray
verbose_eval : bool, int, or None, default None
Whether to display the progress. If None, progress will be displayed
when np.ndarray is returned. If True, progress will be displayed at
boosting stage. If an integer is given, progress will be displayed
at every given `verbose_eval` boosting stage.
show_stdv : bool, default True
Whether to display the standard deviation in progress.
Results are not affected, and always contains std.
seed : int
Seed used to generate the folds (passed to numpy.random.seed).
callbacks : list of callback functions
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using
:ref:`Callback API <callback_api>`.
Example:
.. code-block:: python
[xgb.callback.LearningRateScheduler(custom_rates)]
shuffle : bool
Shuffle data before creating folds.
Returns
-------
evaluation history : list(string)
"""
if stratified is True and not SKLEARN_INSTALLED:
raise XGBoostError('sklearn needs to be installed in order to use stratified cv')
if isinstance(metrics, str):
metrics = [metrics]
if isinstance(params, list):
_metrics = [x[1] for x in params if x[0] == 'eval_metric']
params = dict(params)
if 'eval_metric' in params:
params['eval_metric'] = _metrics
else:
params = dict((k, v) for k, v in params.items())
if (not metrics) and 'eval_metric' in params:
if isinstance(params['eval_metric'], list):
metrics = params['eval_metric']
else:
metrics = [params['eval_metric']]
params.pop("eval_metric", None)
results = {}
cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc,
stratified, folds, shuffle)
# setup callbacks
callbacks = [] if callbacks is None else callbacks
is_new_callback = _is_new_callback(callbacks)
if is_new_callback:
assert all(isinstance(c, callback.TrainingCallback)
for c in callbacks), "You can't mix new and old callback styles."
if isinstance(verbose_eval, bool) and verbose_eval:
verbose_eval = 1 if verbose_eval is True else verbose_eval
callbacks.append(callback.EvaluationMonitor(period=verbose_eval,
show_stdv=show_stdv))
if early_stopping_rounds:
callbacks.append(callback.EarlyStopping(
rounds=early_stopping_rounds, maximize=maximize))
callbacks = callback.CallbackContainer(callbacks, metric=feval, is_cv=True)
else:
callbacks = _configure_deprecated_callbacks(
verbose_eval, early_stopping_rounds, maximize, 0,
num_boost_round, feval, None, callbacks,
show_stdv=show_stdv, cvfolds=cvfolds)
booster = _PackedBooster(cvfolds)
callbacks.before_training(booster)
for i in range(num_boost_round):
if callbacks.before_iteration(booster, i, dtrain, None):
break
booster.update(i, obj)
should_break = callbacks.after_iteration(booster, i, dtrain, None)
res = callbacks.aggregated_cv
for key, mean, std in res:
if key + '-mean' not in results:
results[key + '-mean'] = []
if key + '-std' not in results:
results[key + '-std'] = []
results[key + '-mean'].append(mean)
results[key + '-std'].append(std)
if should_break:
for k in results.keys(): # pylint: disable=consider-iterating-dictionary
results[k] = results[k][:(booster.best_iteration + 1)]
break
if as_pandas:
try:
import pandas as pd
results = pd.DataFrame.from_dict(results)
except ImportError:
pass
callbacks.after_training(booster)
return results
| apache-2.0 |
SHornung1/AliPhysics | PWGPP/FieldParam/fitsol.py | 39 | 8343 | #!/usr/bin/env python
debug = True # enable trace
def trace(x):
global debug
if debug: print(x)
trace("loading...")
from itertools import combinations, combinations_with_replacement
from glob import glob
from math import *
import operator
from os.path import basename
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import sklearn.feature_selection
import datetime
def prec_from_pathname(path):
if '2k' in path: return 0.002
elif '5k' in path: return 0.005
else: raise AssertionError('Unknown field strengh: %s' % path)
# ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...]
def combinatrial_vars(vars_str='xyz', length=3):
term_list = []
for l in range(length):
term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)])
return term_list
# product :: a#* => [a] -> a
def product(xs):
return reduce(operator.mul, xs, 1) # foldl in Haskell
# (XYZ, "xx") -> XX
def term(dataframe, vars_str):
return product(map(lambda x: dataframe[x], list(vars_str)))
# (f(X), Y) -> (max deviation, max%, avg dev, avg%)
def deviation_stat(fX, Y, prec=0.005):
dev = np.abs(fX - Y)
(max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0))
(max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100)
return (max_dev, max_pct, avg_dev, avg_pct)
# IO Df
def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]):
sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz']
df = pd.read_csv(path, sep=' ', names=sample_cols)
if cylindrical_axis:
df['r'] = np.sqrt(df.x**2 + df.y**2)
df['p'] = np.arctan2(df.y, df.x)
df['Bt'] = np.sqrt(df.Bx**2 + df.By**2)
df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x)
df['Br'] = df.Bt * np.cos(df.Bpsi)
df['Bp'] = df.Bt * np.sin(df.Bpsi)
if absolute_axis:
df['X'] = np.abs(df.x)
df['Y'] = np.abs(df.y)
df['Z'] = np.abs(df.z)
for var in genvars:
df[var] = term(df, var)
return df
def choose(vars, df1, df2):
X1 = df1.loc[:, vars].as_matrix()
X2 = df2.loc[:, vars].as_matrix()
return (X1, X2)
# IO ()
def run_analysis_for_all_fields():
sample_set = glob("dat_z22/*2k*.sample.dat")
test_set = glob("dat_z22/*2k*.test.dat")
#print(sample_set, test_set)
assert(len(sample_set) == len(test_set) and len(sample_set) > 0)
result = pd.DataFrame()
for i, sample_file in enumerate(sample_set):
trace("run_analysis('%s', '%s')" % (sample_file, test_set[i]))
df = run_analysis(sample_file, test_set[i])
result = result.append(df, ignore_index=True)
write_header(result)
def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat',
test_file = 'dat_z22/tpc2k-z0-q2.test.dat'):
global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result
precision = prec_from_pathname(sample_file)
assert(precision == prec_from_pathname(test_file))
xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims
trace("reading training samples... " + sample_file)
df = load_samples(sample_file, genvars=xvars_full)
trace("reading test samples..." + test_file)
test = load_samples(test_file, genvars=xvars_full)
trace("linear regression fit...")
lr = sklearn.linear_model.LinearRegression()
#ri = sklearn.linear_model.RidgeCV()
#la = sklearn.linear_model.LassoCV()
fs = sklearn.feature_selection.RFE(lr, 1, verbose=0)
#xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz']
#xvars = ["xx", "yy", "zz", 'x', 'y', 'z', 'xzz', 'yzz']
#xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr']
#xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz']
yvars = ['Bx', 'By', 'Bz']
#yvars = ['Bz']
(Y, Ytest) = choose(yvars, df, test)
#(Y, Ytest) = (df['Bz'], test['Bz'])
xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power
(X, Xtest) = choose(xvars, df, test)
for y in yvars:
fs.fit(X, df[y])
res = pd.DataFrame({ "term": xvars, "rank": fs.ranking_ })
trace(y)
trace(res.sort_values(by = "rank"))
#xvars=list(res.sort_values(by="rank")[:26]['term'])
lr.fit(X, Y)
trace(', '.join(yvars) + " = 1 + " + ' + '.join(xvars))
test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#for i in range(len(yvars)):
# arr = [lr.intercept_[i]] + lr.coef_[i]
# arr = [ str(x) for x in arr ]
# print(yvars[i] + " = { " + ', '.join(arr) + " }")
# print("deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
# ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] ))
(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
trace("linear regression R^2 [train data]: %.8f" % sample_score)
trace("linear regression R^2 [test data] : %.8f" % test_score)
return pd.DataFrame(
{ "xvars": [xvars],
"yvars": [yvars],
"max_dev": [test_dev[0]],
"max%": [test_dev[1]],
"avg_dev": [test_dev[2]],
"avg%": [test_dev[3]],
"sample_score": [sample_score],
"score": [test_score],
"coeffs": [lr.coef_],
"intercept": [lr.intercept_],
"sample_file": [sample_file],
"test_file": [test_file],
"precision": [precision],
"volume_id": [volume_id_from_path(sample_file)]
})
def volume_id_from_path(path):
return basename(path)\
.replace('.sample.dat', '')\
.replace('-', '_')
def get_location_by_volume_id(id):
if 'its' in id: r_bin = 0
if 'tpc' in id: r_bin = 1
if 'tof' in id: r_bin = 2
if 'tofext' in id: r_bin = 3
if 'cal' in id: r_bin = 4
z_bin = int(id.split('_')[1][1:]) # "tofext2k_z0_q4" -> 0
if 'q1' in id: quadrant = 0
if 'q2' in id: quadrant = 1
if 'q3' in id: quadrant = 2
if 'q4' in id: quadrant = 3
return r_bin, z_bin, quadrant
def write_header(result):
#result.to_csv("magfield_params.csv")
#result.to_html("magfield_params.html")
print("# This file was generated from sysid.py at " + str(datetime.datetime.today()))
print("# " + ', '.join(result.iloc[0].yvars) + " = 1 + " + ' + '.join(result.iloc[0].xvars))
print("# barrel r: 0 < its < 80 < tpc < 250 < tof < 400 < tofext < 423 < cal < 500")
print("# barrel z: -550 < z < 550")
print("# phi: 0 < q1 < 0.5pi < q2 < pi < q3 < 1.5pi < q4 < 2pi")
print("# header: Rbin Zbin Quadrant Nval_per_compoment(=20)")
print("# data: Nval_per_compoment x floats")
#print("# R^2: coefficient of determination in multiple linear regression. [0,1]")
print("")
for index, row in result.iterrows():
#print("// ** %s - R^2 %s" % (row.volume_id, row.score))
print("#" + row.volume_id)
r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id)
print("%s %s %s 20" % (r_bin, z_bin, quadrant))
for i, yvar in enumerate(row.yvars):
name = row.volume_id #+ '_' + yvar.lower()
print("# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
(row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i]))
coef = [row['intercept'][i]] + list(row['coeffs'][i])
arr = [ "%.5e" % x for x in coef ]
body = ' '.join(arr)
#decl = "const double[] %s = { %s };\n" % (name, body)
#print(decl)
print(body)
print("")
#write_header(run_analysis())
run_analysis_for_all_fields()
#for i in range(10):
# for xvars in combinations(xvars_full, i+1):
#(X, Xtest) = choose(xvars, df, test)
#lr.fit(X, Y)
#ri.fit(X, Y)
#la.fit(X, Y)
#fs.fit(X, Y)
#print xvars
#(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
#print("linear R^2[sample] %.8f" % sample_score)
#print("linear R^2[test] %.8f" % test_score)
#(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest))
#print("lasso R^2[sample] %.8f" % sample_score2)
#print("lasso R^2[test] %.8f" % test_score2)
#print(la.coef_)
#for i in range(len(yvars)):
# print(yvars[i])
# print(pd.DataFrame({"Name": xvars, "Params": lr.coef_[i]}).sort_values(by='Params'))
# print("+ %e" % lr.intercept_[i])
#sample_dev = deviation_stat(lr.predict(X), Y, prec=precision)
#test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision)
#print("[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % sample_dev)
#print("[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev )
#print("lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev2 )
| bsd-3-clause |
niltonlk/nest-simulator | pynest/examples/spatial/grid_iaf_irr.py | 20 | 1453 | # -*- coding: utf-8 -*-
#
# grid_iaf_irr.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create 12 freely placed iaf_psc_alpha neurons
-----------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
nest.ResetKernel()
pos = nest.spatial.free([nest.random.uniform(-0.75, 0.75), nest.random.uniform(-0.5, 0.5)], extent=[2., 1.5])
l1 = nest.Create('iaf_psc_alpha', 12, positions=pos)
nest.PrintNodes()
nest.PlotLayer(l1, nodesize=50)
# beautify
plt.axis([-1.0, 1.0, -0.75, 0.75])
plt.axes().set_aspect('equal', 'box')
plt.axes().set_xticks((-0.75, -0.25, 0.25, 0.75))
plt.axes().set_yticks((-0.5, 0, 0.5))
plt.grid(True)
plt.xlabel('Extent: 2.0')
plt.ylabel('Extent: 1.5')
plt.show()
# plt.savefig('grid_iaf_irr.png')
| gpl-2.0 |
AIML/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
LouisPlisso/analysis_tools | cdfplot-tools/cdfplot_1.2.py | 1 | 13138 | #!/usr/bin/env python
"Module to plot cdf from data or file. Can be called directly."
from optparse import OptionParser
import sys
import pylab
from matplotlib.font_manager import FontProperties
_VERSION = '1.2'
#TODO: possibility to place legend outside graph:
#pylab.subfigure(111)
#pylab.subplots_adjust(right=0.8) or (top=0.8)
#pylab.legend(loc=(1.1, 0.5)
#CCDF
def ccdfplotdataN(list_data_name, _xlabel = 'x',
_ylabel = r'1 - P(X$\leq$x)',
_title = 'Empirical Distribution',
_fs_legend='medium',
_fs = 'x-large', _loc=0):
"Plot the ccdf of a list of data arrays and names"
#corresponding line width with larger width for '-.' and ':'
if not list_data_name:
print >> sys.stderr, "no data to plot"
return
_ls = ['-', '-.', '--'] #, ':']
_lw = [1, 2, 3] #, 4]
_ls_len = len(_ls)
#plot all cdfs except last one
for i in range(len(list_data_name) - 1):
name, data = list_data_name[i]
#plot with round robin line style (ls)
#and increasing line width
(div, mod) = divmod(i, _ls_len)
ccdfplotdata(data, _name=name, _lw=_lw[mod]+3*div,
_ls=_ls[mod], _fs=_fs, _fs_legend=_fs_legend)
#for last cdf, we put the legend and names
(name, data) = list_data_name[-1]
(div, mod) = divmod(len(list_data_name), _ls_len)
ccdfplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
_ylabel=_ylabel, _lw=_lw[mod]+2*div, _ls=_ls[mod], _fs=_fs)
setgraph_logx(_loc=_loc)
def ccdfplotdata(data_in, _xlabel = 'x', _ylabel = r'1 - P(X$\leq$x)',
_title = 'Empirical Distribution',
_name = 'Data', _lw = 2, _fs = 'x-large', _fs_legend='medium',
_ls = '-', _loc=0):
"Plot the ccdf of a data array"
data = pylab.array(data_in, copy=True)
data.sort()
data_len = len(data)
ccdf = 1 - pylab.arange(data_len)/(data_len - 1.0)
pylab.plot(data, ccdf, 'k', lw = _lw, drawstyle = 'steps',
label = _name, ls = _ls)
pylab.xlabel(_xlabel, size = _fs)
pylab.ylabel(_ylabel, size = _fs)
pylab.title(_title, size = _fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def ccdfplot(_file, col = 0, xlabel = 'X', ylabel = r'1 - P(X$\leq$x)',
title = 'Empirical Distribution', name = 'Data',
_lw = 2, _fs = 'x-large', _ls = '-', _loc=0):
"Plot the ccdf of a column in file"
data = pylab.loadtxt(_file, usecols = [col])
ccdfplotdata(data, _xlabel = xlabel, _ylabel = ylabel,
_title = title, _name = name,
_lw = _lw, _fs = _fs, _ls = _ls, _loc = _loc)
#CDF
def cdfplotdataN(list_data_name, _xlabel = 'x', _ylabel = r'P(X$\leq$x)',
_title = 'Empirical Distribution', _fs = 'x-large',
_fs_legend='medium', _loc = 0, do_color=True, logx=True):
"Plot the cdf of a list of names and data arrays"
#corresponding line width with larger width for '-.' and ':'
if not list_data_name:
print >> sys.stderr, "no data to plot"
return
_ls = ['-', '-.', '-', '--'] * 2 #, ':']
# _lw = [1, 1] + [2, 4, 2, 4, 2, 4]#, 4]
_lw = [2, 4] + [2, 4, 2, 4, 2, 4]#, 4]
assert len(_ls) == len(_lw)
# _colors = ['k', 'k', 'g', 'c', 'm', 'r', 'y', 'pink']
# consequent plots are same color
_colors = ['k', 'k', 'c', 'c', 'm', 'm', 'y', 'y']
for i in range(len(list_data_name)):# - 1):
name, data = list_data_name[i]
#plot with round robin line style (ls)
#and increasing line width
(div, mod) = divmod(i, len(_ls))
if not do_color:
color = 'k'
# line_width = _lw[mod]+2*div
else:
color = _colors[i % len(_colors)]
# line_width = 2 + div
line_width = _lw[mod]+2*div
cdfplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
_ylabel=_ylabel, _lw=line_width, _ls=_ls[mod], _fs=_fs,
_color=color)
if logx:
setgraph_logx(_loc=_loc, _fs_legend=_fs_legend)
else:
setgraph_lin(_loc=_loc, _fs_legend=_fs_legend)
# cdfplotdata(data, _name=name, _lw=line_width, _ls=_ls[mod],
# _fs=_fs, _color=color)
# for last cdf, we put the legend and names
# (data, name) = list_data_name[-1]
# (div, mod) = divmod(len(list_data_name), len(_ls))
# if not do_color:
# color = 'k'
# line_width = _lw[mod]+2*div
# else:
# color = _colors[i % len(_colors)]
# line_width = 1 + div
# cdfplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
# _ylabel=_ylabel, _lw=line_width, _ls=_ls[mod], _fs=_fs,
# _fs_legend=_fs_legend, _color=color)
def cdfplotdata(data_in, _color='k', _xlabel='x', _ylabel=r'P(X$\leq$x)',
_title='Empirical Distribution', _name='Data', _lw=2, _fs='x-large',
_fs_legend='medium', _ls = '-', _loc=0):
"Plot the cdf of a data array"
# data = pylab.array(data_in, copy=True)
data = sorted(data_in)
data_len = len(data)
cdf = pylab.arange(data_len+1)/(data_len - 0.0)
data.append(data[-1])
pylab.plot(data, cdf, _color, lw = _lw, drawstyle = 'steps',
label = _name, ls = _ls)
pylab.xlabel(_xlabel, size = _fs)
pylab.ylabel(_ylabel, size = _fs)
pylab.title(_title, size = _fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def cdfplot(_file, col = 0, xlabel = 'X',
ylabel = r'P(X$\leq$x)',
title = 'Empirical Distribution', name = 'Data',
_lw = 2, _fs = 'x-large', _ls = '-', _loc=0):
"Plot the cdf of a column in file"
data = pylab.loadtxt(_file, usecols = [col])
cdfplotdata(data, _xlabel = xlabel, _ylabel = ylabel,
_title = title, _name = name,
_lw = _lw, _fs = _fs, _ls = _ls, _loc = _loc)
def setgraph_lin(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def setgraph_logx(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
pylab.semilogx(nonposy='clip', nonposx='clip')
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def setgraph_loglog(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
pylab.loglog(nonposy='clip', nonposx='clip')
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def setgraph_logy(_fs = 'x-large', _loc = 2, _fs_legend = 'medium'):
"Set graph in xlogscale and adjusts x&y markers"
pylab.grid(True)
pylab.semilogy(nonposy='clip', nonposx='clip')
_ax = pylab.gca()
for tick in _ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
for tick in _ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(_fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
#repartition plots
def repartplotdataN(list_data_name, _xlabel = 'Rank',
_ylabel = 'Cumulative Percentage of Data',
_title = 'Repartition of values',
_fs = 'x-large', do_color=True, _loc=0, loglog=True):
"Plot the repartition of a list of data arrays and names"
#corresponding line width with larger width for '-.' and ':'
if not list_data_name:
print >> sys.stderr, "no data to plot"
return
_ls = ['-', '-.', '-', '--'] * 2 #, ':']
# _ls = ['-', '-.', '--', ':']
_lw = [2, 4] + [2, 4, 2, 4, 2, 4]#, 4]
# _lw = [1, 2, 3, 4]
assert len(_ls) == len(_lw)
_len_ls = len(_ls)
# consequent plots are same color
_colors = ['k', 'k', 'c', 'c', 'm', 'm', 'y', 'y']
for i in range(len(list_data_name)):# - 1):
name, data = list_data_name[i]
#plot with round robin line style (ls)
#and increasing line width
(div, mod) = divmod(i, _len_ls)
if not do_color:
color = 'k'
# line_width = _lw[mod]+2*div
else:
color = _colors[i % len(_colors)]
# line_width = 2 + div
line_width = _lw[mod]+2*div
repartplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
_ylabel=_ylabel, _lw=line_width, _ls=_ls[mod], _fs=_fs,
_color=color)
if loglog:
setgraph_loglog(_loc=_loc)
else:
setgraph_lin(_loc=_loc)
# #for last cdf, we put the legend and names
# (name, data) = list_data_name[-1]
# (div, mod) = divmod(len(list_data_name), _len_ls)
# repartplotdata(data, _name=name, _title=_title, _xlabel=_xlabel,
# _ylabel=_ylabel, _lw=_lw[mod]+2*div, _ls=_ls[mod], _fs=_fs)
# setgraph_loglog(_loc=_loc)
def repartplotdata(data_in, _color='k', _xlabel = 'Rank',
_ylabel = 'Cumulative Percentage of Data',
_title = 'Repartition of values', _name = 'Data', _lw = 2,
_fs = 'x-large', _fs_legend='medium', _ls = '-', _loc=0):
"Plot the repartition of a data array"
data = pylab.array(data_in, copy=True)
data.sort()
rank = pylab.arange(1, len(data) + 1)
values = pylab.cumsum(data[::-1])
pylab.plot(rank, 100 * values / values[-1], _color, lw = _lw,
drawstyle = 'steps', label = _name, ls = _ls)
pylab.xlabel(_xlabel, size = _fs)
pylab.ylabel(_ylabel, size = _fs)
pylab.title(_title, size = _fs)
font = FontProperties(size = _fs_legend)
pylab.legend(loc = _loc, prop = font)
def repartplot(_file, col = 0, xlabel = 'Rank',
ylabel = 'Cumulative Percentage of Data',
title = 'Repartition of values', name = 'Data',
_lw = 2, _fs = 'x-large', _ls = '-', _loc=0):
"Plot the cdf of a column in file"
data = pylab.loadtxt(_file, usecols = [col])
repartplotdata(data, _xlabel = xlabel, _ylabel = ylabel,
_title = title, _name = name,
_lw = _lw, _fs = _fs, _ls = _ls, _loc = _loc)
def main():
"Program wrapper."
usage = "%prog -r data_file [-c col -x x_label -y y_label -t title \
-n data_name -lw line_width -fs fontsize [-g|-p]]"
parser = OptionParser(usage = usage, version="%prog " + _VERSION)
parser.add_option("-r", dest = "file",
help = "input data file or stdin if FILE is -")
parser.add_option("-c", dest = "col", type = "int", default = 0,
help = "column in the file [default value = 0]")
parser.add_option("-x", dest = "xlabel", default = 'X',
help = "x label")
parser.add_option("-y", dest = "ylabel",
default = r'P(X$\leq$x)', help = "y label")
parser.add_option("-t", dest = "title",
default = 'Empirical Distribution',
help = "graph title")
parser.add_option("-n", dest = "name", default = 'Data',
help = "data name")
parser.add_option("-l", "--lw", dest = "lw", type = "int",
default = 2, help = "line width")
parser.add_option("-f", "--fs", dest = "fs", type = "int",
default = 18, help = "font size")
parser.add_option("-g", "--ccdf", dest = "g",
action="store_true", default=False,
help = "plot ccdf instead of cdf")
parser.add_option("-p", "--repartition", dest = "p",
action="store_true", default=False,
help = "plot repartition instead of cdf")
(options, _) = parser.parse_args()
if not options.file:
print "Must provide filename."
parser.print_help()
exit(1)
if options.file == '-':
out_file = sys.stdin
else:
try:
out_file = open(options.file, 'r')
except IOError:
print "File, %s, does not exist." % options.file
parser.print_help()
exit(1)
if options.g and options.p:
print "g and p options are exclusive."
parser.print_help()
exit(1)
pylab.clf()
if options.g:
ccdfplot(out_file, col=options.col, _lw=options.lw, _fs=options.fs)
elif options.p:
repartplot(out_file, col=options.col, _lw=options.lw, _fs=options.fs)
else:
cdfplot(out_file, col=options.col, xlabel=options.xlabel,
ylabel=options.ylabel, title=options.title,
name=options.name, _lw=options.lw, _fs=options.fs)
setgraph_logx(_fs = options.fs)
pylab.show()
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
hennersz/pySpace | basemap/examples/hexbin_demo.py | 4 | 2322 | # example showing how to plot scattered data with hexbin.
from numpy.random import uniform
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
# create north polar stereographic basemap
m = Basemap(lon_0=270, boundinglat=20, projection='npstere',round=True)
#m = Basemap(lon_0=-105,lat_0=40,projection='ortho')
# number of points, bins to plot.
npts = 10000
bins = 40
# generate random points on a sphere,
# so that every small area on the sphere is expected
# to have the same number of points.
# http://mathworld.wolfram.com/SpherePointPicking.html
u = uniform(0.,1.,size=npts)
v = uniform(0.,1.,size=npts)
lons = 360.*u
lats = (180./np.pi)*np.arccos(2*v-1) - 90.
# toss points outside of map region.
lats = np.compress(lats > 20, lats)
lons = np.compress(lats > 20, lons)
# convert to map projection coordinates.
x1, y1 = m(lons, lats)
# remove points outside projection limb.
x = np.compress(np.logical_or(x1 < 1.e20,y1 < 1.e20), x1)
y = np.compress(np.logical_or(x1 < 1.e20,y1 < 1.e20), y1)
# function to plot at those points.
xscaled = 4.*(x-0.5*(m.xmax-m.xmin))/m.xmax
yscaled = 4.*(y-0.5*(m.ymax-m.ymin))/m.ymax
z = xscaled*np.exp(-xscaled**2-yscaled**2)
# make plot using hexbin
fig = plt.figure(figsize=(12,5))
ax = fig.add_subplot(121)
CS = m.hexbin(x,y,C=z,gridsize=bins,cmap=plt.cm.jet)
# draw coastlines, lat/lon lines.
m.drawcoastlines()
m.drawparallels(np.arange(0,81,20))
m.drawmeridians(np.arange(-180,181,60))
m.colorbar() # draw colorbar
plt.title('hexbin demo')
# use histogram2d instead of hexbin.
ax = fig.add_subplot(122)
# remove points outside projection limb.
bincount, xedges, yedges = np.histogram2d(x, y, bins=bins)
mask = bincount == 0
# reset zero values to one to avoid divide-by-zero
bincount = np.where(bincount == 0, 1, bincount)
H, xedges, yedges = np.histogram2d(x, y, bins=bins, weights=z)
H = np.ma.masked_where(mask, H/bincount)
# set color of masked values to axes background (hexbin does this by default)
palette = plt.cm.jet
palette.set_bad(ax.get_axis_bgcolor(), 1.0)
CS = m.pcolormesh(xedges,yedges,H.T,shading='flat',cmap=palette)
# draw coastlines, lat/lon lines.
m.drawcoastlines()
m.drawparallels(np.arange(0,81,20))
m.drawmeridians(np.arange(-180,181,60))
m.colorbar() # draw colorbar
plt.title('histogram2d demo')
plt.show()
| gpl-3.0 |
AnishShah/tensorflow | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 24 | 6776 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support (deprecated)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
USE OF THIS EXCEPTION IS DEPRECATED.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
MTG/sms-tools | lectures/03-Fourier-properties/plots-code/symmetry.py | 26 | 1178 | import matplotlib.pyplot as plt
import numpy as np
import sys
from scipy.fftpack import fft, ifft, fftshift
import math
sys.path.append('../../../software/models/')
import utilFunctions as UF
import dftModel as DF
(fs, x) = UF.wavread('../../../sounds/soprano-E4.wav')
w = np.hamming(511)
N = 512
pin = 5000
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
fftbuffer = np.zeros(N)
x1 = x[pin-hM1:pin+hM2]
xw = x1*w
fftbuffer[:hM1] = xw[hM2:]
fftbuffer[N-hM2:] = xw[:hM2]
X = fftshift(fft(fftbuffer))
mX = 20 * np.log10(abs(X))
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 7))
plt.subplot(311)
plt.plot(np.arange(-hM1, hM2), x1, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.ylabel('amplitude')
plt.title('x (soprano-E4.wav)')
plt.subplot(3,1,2)
plt.plot(np.arange(-N/2,N/2), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,-48,max(mX)])
plt.title ('mX = 20*log10(abs(X))')
plt.ylabel('amplitude (dB)')
plt.subplot(3,1,3)
plt.plot(np.arange(-N/2,N/2), pX, 'c', lw=1.5)
plt.axis([-N/2,N/2,min(pX),max(pX)])
plt.title ('pX = unwrap(angle(X))')
plt.ylabel('phase (radians)')
plt.tight_layout()
plt.savefig('symmetry.png')
plt.show()
| agpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/dtypes/test_common.py | 3 | 19448 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas.core.dtypes.dtypes import (DatetimeTZDtype, PeriodDtype,
CategoricalDtype, IntervalDtype)
import pandas.core.dtypes.common as com
import pandas.util.testing as tm
class TestPandasDtype(object):
# Passing invalid dtype, both as a string or object, must raise TypeError
# Per issue GH15520
def test_invalid_dtype_error(self):
msg = 'not understood'
invalid_list = [pd.Timestamp, 'pd.Timestamp', list]
for dtype in invalid_list:
with tm.assert_raises_regex(TypeError, msg):
com.pandas_dtype(dtype)
valid_list = [object, 'float64', np.object_, np.dtype('object'), 'O',
np.float64, float, np.dtype('float64')]
for dtype in valid_list:
com.pandas_dtype(dtype)
def test_numpy_dtype(self):
for dtype in ['M8[ns]', 'm8[ns]', 'object', 'float64', 'int64']:
assert com.pandas_dtype(dtype) == np.dtype(dtype)
def test_numpy_string_dtype(self):
# do not parse freq-like string as period dtype
assert com.pandas_dtype('U') == np.dtype('U')
assert com.pandas_dtype('S') == np.dtype('S')
def test_datetimetz_dtype(self):
for dtype in ['datetime64[ns, US/Eastern]',
'datetime64[ns, Asia/Tokyo]',
'datetime64[ns, UTC]']:
assert com.pandas_dtype(dtype) is DatetimeTZDtype(dtype)
assert com.pandas_dtype(dtype) == DatetimeTZDtype(dtype)
assert com.pandas_dtype(dtype) == dtype
def test_categorical_dtype(self):
assert com.pandas_dtype('category') == CategoricalDtype()
def test_period_dtype(self):
for dtype in ['period[D]', 'period[3M]', 'period[U]',
'Period[D]', 'Period[3M]', 'Period[U]']:
assert com.pandas_dtype(dtype) is PeriodDtype(dtype)
assert com.pandas_dtype(dtype) == PeriodDtype(dtype)
assert com.pandas_dtype(dtype) == dtype
dtypes = dict(datetime_tz=com.pandas_dtype('datetime64[ns, US/Eastern]'),
datetime=com.pandas_dtype('datetime64[ns]'),
timedelta=com.pandas_dtype('timedelta64[ns]'),
period=PeriodDtype('D'),
integer=np.dtype(np.int64),
float=np.dtype(np.float64),
object=np.dtype(np.object),
category=com.pandas_dtype('category'))
@pytest.mark.parametrize('name1,dtype1',
list(dtypes.items()),
ids=lambda x: str(x))
@pytest.mark.parametrize('name2,dtype2',
list(dtypes.items()),
ids=lambda x: str(x))
def test_dtype_equal(name1, dtype1, name2, dtype2):
# match equal to self, but not equal to other
assert com.is_dtype_equal(dtype1, dtype1)
if name1 != name2:
assert not com.is_dtype_equal(dtype1, dtype2)
def test_dtype_equal_strict():
# we are strict on kind equality
for dtype in [np.int8, np.int16, np.int32]:
assert not com.is_dtype_equal(np.int64, dtype)
for dtype in [np.float32]:
assert not com.is_dtype_equal(np.float64, dtype)
# strict w.r.t. PeriodDtype
assert not com.is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
# strict w.r.t. datetime64
assert not com.is_dtype_equal(
com.pandas_dtype('datetime64[ns, US/Eastern]'),
com.pandas_dtype('datetime64[ns, CET]'))
# see gh-15941: no exception should be raised
assert not com.is_dtype_equal(None, None)
def get_is_dtype_funcs():
"""
Get all functions in pandas.core.dtypes.common that
begin with 'is_' and end with 'dtype'
"""
fnames = [f for f in dir(com) if (f.startswith('is_') and
f.endswith('dtype'))]
return [getattr(com, fname) for fname in fnames]
@pytest.mark.parametrize('func',
get_is_dtype_funcs(),
ids=lambda x: x.__name__)
def test_get_dtype_error_catch(func):
# see gh-15941
#
# No exception should be raised.
assert not func(None)
def test_is_object():
assert com.is_object_dtype(object)
assert com.is_object_dtype(np.array([], dtype=object))
assert not com.is_object_dtype(int)
assert not com.is_object_dtype(np.array([], dtype=int))
assert not com.is_object_dtype([1, 2, 3])
def test_is_sparse():
assert com.is_sparse(pd.SparseArray([1, 2, 3]))
assert com.is_sparse(pd.SparseSeries([1, 2, 3]))
assert not com.is_sparse(np.array([1, 2, 3]))
# This test will only skip if the previous assertions
# pass AND scipy is not installed.
sparse = pytest.importorskip("scipy.sparse")
assert not com.is_sparse(sparse.bsr_matrix([1, 2, 3]))
def test_is_scipy_sparse():
tm._skip_if_no_scipy()
from scipy.sparse import bsr_matrix
assert com.is_scipy_sparse(bsr_matrix([1, 2, 3]))
assert not com.is_scipy_sparse(pd.SparseArray([1, 2, 3]))
assert not com.is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
def test_is_categorical():
cat = pd.Categorical([1, 2, 3])
assert com.is_categorical(cat)
assert com.is_categorical(pd.Series(cat))
assert com.is_categorical(pd.CategoricalIndex([1, 2, 3]))
assert not com.is_categorical([1, 2, 3])
def test_is_datetimetz():
assert not com.is_datetimetz([1, 2, 3])
assert not com.is_datetimetz(pd.DatetimeIndex([1, 2, 3]))
assert com.is_datetimetz(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
dtype = DatetimeTZDtype("ns", tz="US/Eastern")
s = pd.Series([], dtype=dtype)
assert com.is_datetimetz(s)
def test_is_period():
assert not com.is_period([1, 2, 3])
assert not com.is_period(pd.Index([1, 2, 3]))
assert com.is_period(pd.PeriodIndex(["2017-01-01"], freq="D"))
def test_is_datetime64_dtype():
assert not com.is_datetime64_dtype(object)
assert not com.is_datetime64_dtype([1, 2, 3])
assert not com.is_datetime64_dtype(np.array([], dtype=int))
assert com.is_datetime64_dtype(np.datetime64)
assert com.is_datetime64_dtype(np.array([], dtype=np.datetime64))
def test_is_datetime64tz_dtype():
assert not com.is_datetime64tz_dtype(object)
assert not com.is_datetime64tz_dtype([1, 2, 3])
assert not com.is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3]))
assert com.is_datetime64tz_dtype(pd.DatetimeIndex(
[1, 2, 3], tz="US/Eastern"))
def test_is_timedelta64_dtype():
assert not com.is_timedelta64_dtype(object)
assert not com.is_timedelta64_dtype([1, 2, 3])
assert not com.is_timedelta64_dtype(np.array([], dtype=np.datetime64))
assert com.is_timedelta64_dtype(np.timedelta64)
assert com.is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
assert not com.is_timedelta64_dtype("0 days 00:00:00")
def test_is_period_dtype():
assert not com.is_period_dtype(object)
assert not com.is_period_dtype([1, 2, 3])
assert not com.is_period_dtype(pd.Period("2017-01-01"))
assert com.is_period_dtype(PeriodDtype(freq="D"))
assert com.is_period_dtype(pd.PeriodIndex([], freq="A"))
def test_is_interval_dtype():
assert not com.is_interval_dtype(object)
assert not com.is_interval_dtype([1, 2, 3])
assert com.is_interval_dtype(IntervalDtype())
interval = pd.Interval(1, 2, closed="right")
assert not com.is_interval_dtype(interval)
assert com.is_interval_dtype(pd.IntervalIndex([interval]))
def test_is_categorical_dtype():
assert not com.is_categorical_dtype(object)
assert not com.is_categorical_dtype([1, 2, 3])
assert com.is_categorical_dtype(CategoricalDtype())
assert com.is_categorical_dtype(pd.Categorical([1, 2, 3]))
assert com.is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
def test_is_string_dtype():
assert not com.is_string_dtype(int)
assert not com.is_string_dtype(pd.Series([1, 2]))
assert com.is_string_dtype(str)
assert com.is_string_dtype(object)
assert com.is_string_dtype(np.array(['a', 'b']))
def test_is_period_arraylike():
assert not com.is_period_arraylike([1, 2, 3])
assert not com.is_period_arraylike(pd.Index([1, 2, 3]))
assert com.is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
def test_is_datetime_arraylike():
assert not com.is_datetime_arraylike([1, 2, 3])
assert not com.is_datetime_arraylike(pd.Index([1, 2, 3]))
assert com.is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
def test_is_datetimelike():
assert not com.is_datetimelike([1, 2, 3])
assert not com.is_datetimelike(pd.Index([1, 2, 3]))
assert com.is_datetimelike(pd.DatetimeIndex([1, 2, 3]))
assert com.is_datetimelike(pd.PeriodIndex([], freq="A"))
assert com.is_datetimelike(np.array([], dtype=np.datetime64))
assert com.is_datetimelike(pd.Series([], dtype="timedelta64[ns]"))
assert com.is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
dtype = DatetimeTZDtype("ns", tz="US/Eastern")
s = pd.Series([], dtype=dtype)
assert com.is_datetimelike(s)
def test_is_integer_dtype():
assert not com.is_integer_dtype(str)
assert not com.is_integer_dtype(float)
assert not com.is_integer_dtype(np.datetime64)
assert not com.is_integer_dtype(np.timedelta64)
assert not com.is_integer_dtype(pd.Index([1, 2.]))
assert not com.is_integer_dtype(np.array(['a', 'b']))
assert not com.is_integer_dtype(np.array([], dtype=np.timedelta64))
assert com.is_integer_dtype(int)
assert com.is_integer_dtype(np.uint64)
assert com.is_integer_dtype(pd.Series([1, 2]))
def test_is_signed_integer_dtype():
assert not com.is_signed_integer_dtype(str)
assert not com.is_signed_integer_dtype(float)
assert not com.is_signed_integer_dtype(np.uint64)
assert not com.is_signed_integer_dtype(np.datetime64)
assert not com.is_signed_integer_dtype(np.timedelta64)
assert not com.is_signed_integer_dtype(pd.Index([1, 2.]))
assert not com.is_signed_integer_dtype(np.array(['a', 'b']))
assert not com.is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32))
assert not com.is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
assert com.is_signed_integer_dtype(int)
assert com.is_signed_integer_dtype(pd.Series([1, 2]))
def test_is_unsigned_integer_dtype():
assert not com.is_unsigned_integer_dtype(str)
assert not com.is_unsigned_integer_dtype(int)
assert not com.is_unsigned_integer_dtype(float)
assert not com.is_unsigned_integer_dtype(pd.Series([1, 2]))
assert not com.is_unsigned_integer_dtype(pd.Index([1, 2.]))
assert not com.is_unsigned_integer_dtype(np.array(['a', 'b']))
assert com.is_unsigned_integer_dtype(np.uint64)
assert com.is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
def test_is_int64_dtype():
assert not com.is_int64_dtype(str)
assert not com.is_int64_dtype(float)
assert not com.is_int64_dtype(np.int32)
assert not com.is_int64_dtype(np.uint64)
assert not com.is_int64_dtype(pd.Index([1, 2.]))
assert not com.is_int64_dtype(np.array(['a', 'b']))
assert not com.is_int64_dtype(np.array([1, 2], dtype=np.uint32))
assert com.is_int64_dtype(np.int64)
assert com.is_int64_dtype(np.array([1, 2], dtype=np.int64))
def test_is_int_or_datetime_dtype():
assert not com.is_int_or_datetime_dtype(str)
assert not com.is_int_or_datetime_dtype(float)
assert not com.is_int_or_datetime_dtype(pd.Index([1, 2.]))
assert not com.is_int_or_datetime_dtype(np.array(['a', 'b']))
assert com.is_int_or_datetime_dtype(int)
assert com.is_int_or_datetime_dtype(np.uint64)
assert com.is_int_or_datetime_dtype(np.datetime64)
assert com.is_int_or_datetime_dtype(np.timedelta64)
assert com.is_int_or_datetime_dtype(pd.Series([1, 2]))
assert com.is_int_or_datetime_dtype(np.array([], dtype=np.datetime64))
assert com.is_int_or_datetime_dtype(np.array([], dtype=np.timedelta64))
def test_is_datetime64_any_dtype():
assert not com.is_datetime64_any_dtype(int)
assert not com.is_datetime64_any_dtype(str)
assert not com.is_datetime64_any_dtype(np.array([1, 2]))
assert not com.is_datetime64_any_dtype(np.array(['a', 'b']))
assert com.is_datetime64_any_dtype(np.datetime64)
assert com.is_datetime64_any_dtype(np.array([], dtype=np.datetime64))
assert com.is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
assert com.is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3],
dtype=np.datetime64))
def test_is_datetime64_ns_dtype():
assert not com.is_datetime64_ns_dtype(int)
assert not com.is_datetime64_ns_dtype(str)
assert not com.is_datetime64_ns_dtype(np.datetime64)
assert not com.is_datetime64_ns_dtype(np.array([1, 2]))
assert not com.is_datetime64_ns_dtype(np.array(['a', 'b']))
assert not com.is_datetime64_ns_dtype(np.array([], dtype=np.datetime64))
# This datetime array has the wrong unit (ps instead of ns)
assert not com.is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]"))
assert com.is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
assert com.is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3],
dtype=np.datetime64))
def test_is_timedelta64_ns_dtype():
assert not com.is_timedelta64_ns_dtype(np.dtype('m8[ps]'))
assert not com.is_timedelta64_ns_dtype(
np.array([1, 2], dtype=np.timedelta64))
assert com.is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
assert com.is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
def test_is_datetime_or_timedelta_dtype():
assert not com.is_datetime_or_timedelta_dtype(int)
assert not com.is_datetime_or_timedelta_dtype(str)
assert not com.is_datetime_or_timedelta_dtype(pd.Series([1, 2]))
assert not com.is_datetime_or_timedelta_dtype(np.array(['a', 'b']))
assert com.is_datetime_or_timedelta_dtype(np.datetime64)
assert com.is_datetime_or_timedelta_dtype(np.timedelta64)
assert com.is_datetime_or_timedelta_dtype(
np.array([], dtype=np.timedelta64))
assert com.is_datetime_or_timedelta_dtype(
np.array([], dtype=np.datetime64))
def test_is_numeric_v_string_like():
assert not com.is_numeric_v_string_like(1, 1)
assert not com.is_numeric_v_string_like(1, "foo")
assert not com.is_numeric_v_string_like("foo", "foo")
assert not com.is_numeric_v_string_like(np.array([1]), np.array([2]))
assert not com.is_numeric_v_string_like(
np.array(["foo"]), np.array(["foo"]))
assert com.is_numeric_v_string_like(np.array([1]), "foo")
assert com.is_numeric_v_string_like("foo", np.array([1]))
assert com.is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
assert com.is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
def test_is_datetimelike_v_numeric():
dt = np.datetime64(pd.datetime(2017, 1, 1))
assert not com.is_datetimelike_v_numeric(1, 1)
assert not com.is_datetimelike_v_numeric(dt, dt)
assert not com.is_datetimelike_v_numeric(np.array([1]), np.array([2]))
assert not com.is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
assert com.is_datetimelike_v_numeric(1, dt)
assert com.is_datetimelike_v_numeric(1, dt)
assert com.is_datetimelike_v_numeric(np.array([dt]), 1)
assert com.is_datetimelike_v_numeric(np.array([1]), dt)
assert com.is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
def test_is_datetimelike_v_object():
obj = object()
dt = np.datetime64(pd.datetime(2017, 1, 1))
assert not com.is_datetimelike_v_object(dt, dt)
assert not com.is_datetimelike_v_object(obj, obj)
assert not com.is_datetimelike_v_object(np.array([dt]), np.array([1]))
assert not com.is_datetimelike_v_object(np.array([dt]), np.array([dt]))
assert not com.is_datetimelike_v_object(np.array([obj]), np.array([obj]))
assert com.is_datetimelike_v_object(dt, obj)
assert com.is_datetimelike_v_object(obj, dt)
assert com.is_datetimelike_v_object(np.array([dt]), obj)
assert com.is_datetimelike_v_object(np.array([obj]), dt)
assert com.is_datetimelike_v_object(np.array([dt]), np.array([obj]))
def test_needs_i8_conversion():
assert not com.needs_i8_conversion(str)
assert not com.needs_i8_conversion(np.int64)
assert not com.needs_i8_conversion(pd.Series([1, 2]))
assert not com.needs_i8_conversion(np.array(['a', 'b']))
assert com.needs_i8_conversion(np.datetime64)
assert com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
assert com.needs_i8_conversion(pd.DatetimeIndex(
[1, 2, 3], tz="US/Eastern"))
def test_is_numeric_dtype():
assert not com.is_numeric_dtype(str)
assert not com.is_numeric_dtype(np.datetime64)
assert not com.is_numeric_dtype(np.timedelta64)
assert not com.is_numeric_dtype(np.array(['a', 'b']))
assert not com.is_numeric_dtype(np.array([], dtype=np.timedelta64))
assert com.is_numeric_dtype(int)
assert com.is_numeric_dtype(float)
assert com.is_numeric_dtype(np.uint64)
assert com.is_numeric_dtype(pd.Series([1, 2]))
assert com.is_numeric_dtype(pd.Index([1, 2.]))
def test_is_string_like_dtype():
assert not com.is_string_like_dtype(object)
assert not com.is_string_like_dtype(pd.Series([1, 2]))
assert com.is_string_like_dtype(str)
assert com.is_string_like_dtype(np.array(['a', 'b']))
def test_is_float_dtype():
assert not com.is_float_dtype(str)
assert not com.is_float_dtype(int)
assert not com.is_float_dtype(pd.Series([1, 2]))
assert not com.is_float_dtype(np.array(['a', 'b']))
assert com.is_float_dtype(float)
assert com.is_float_dtype(pd.Index([1, 2.]))
def test_is_bool_dtype():
assert not com.is_bool_dtype(int)
assert not com.is_bool_dtype(str)
assert not com.is_bool_dtype(pd.Series([1, 2]))
assert not com.is_bool_dtype(np.array(['a', 'b']))
assert com.is_bool_dtype(bool)
assert com.is_bool_dtype(np.bool)
assert com.is_bool_dtype(np.array([True, False]))
def test_is_extension_type():
assert not com.is_extension_type([1, 2, 3])
assert not com.is_extension_type(np.array([1, 2, 3]))
assert not com.is_extension_type(pd.DatetimeIndex([1, 2, 3]))
cat = pd.Categorical([1, 2, 3])
assert com.is_extension_type(cat)
assert com.is_extension_type(pd.Series(cat))
assert com.is_extension_type(pd.SparseArray([1, 2, 3]))
assert com.is_extension_type(pd.SparseSeries([1, 2, 3]))
assert com.is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
dtype = DatetimeTZDtype("ns", tz="US/Eastern")
s = pd.Series([], dtype=dtype)
assert com.is_extension_type(s)
# This test will only skip if the previous assertions
# pass AND scipy is not installed.
sparse = pytest.importorskip("scipy.sparse")
assert not com.is_extension_type(sparse.bsr_matrix([1, 2, 3]))
def test_is_complex_dtype():
assert not com.is_complex_dtype(int)
assert not com.is_complex_dtype(str)
assert not com.is_complex_dtype(pd.Series([1, 2]))
assert not com.is_complex_dtype(np.array(['a', 'b']))
assert com.is_complex_dtype(np.complex)
assert com.is_complex_dtype(np.array([1 + 1j, 5]))
| mit |
DGrady/pandas | pandas/tests/frame/test_reshape.py | 5 | 30682 | # -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
from datetime import datetime
import itertools
import pytest
from numpy.random import randn
from numpy import nan
import numpy as np
from pandas.compat import u
from pandas import (DataFrame, Index, Series, MultiIndex, date_range,
Timedelta, Period)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
with catch_warnings(record=True):
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
tm.assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with tm.assert_raises_regex(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
f = self.frame.copy()
f[:] = np.arange(np.prod(f.shape)).reshape(f.shape)
stacked = f.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, f)
assert_frame_equal(unstacked_df['bar'], f)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, f)
assert_frame_equal(unstacked_cols_df['bar'].T, f)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in NaN entries similar to above
result = data.unstack(fill_value='d')
assert_frame_equal(result, expected)
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
pytest.raises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A', 'B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A', 'B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 2, 'float64': 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64': 2, 'object': 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a'] * 5, 'C': c, 'D': d,
'B': pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],
names=['c1', 'c1'])
df = DataFrame([1, 2], index=idx)
with pytest.raises(ValueError):
df.unstack('c1')
with pytest.raises(ValueError):
df.T.stack('c1')
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
nan = np.nan
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
assert left == right
df = DataFrame({'jim': ['a', 'b', nan, 'd'],
'joe': ['w', 'x', 'y', 'z'],
'jolie': ['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf['jolie'])
df = DataFrame({'1st': ['d'] * 3 + [nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd': ['y'] * 2 + ['w'] * 3 + [nan] * 3 +
['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,
'3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59,
50, 62, 59, 76, 52, 14, 53, 60, 51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in itertools.permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame(
{'A': list('aaaabbbb'), 'B': range(8), 'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, nan, nan, nan, nan],
[nan, nan, nan, nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C': np.arange(10),
'B': (date_range('2012-01-01', periods=5)
.tolist() * 2)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', nan, nan, 680585148],
['U', 0.0, nan, 680585148],
['Pb', 7.07e-06, nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id', 'dosage', 'agent']).unstack()
vals = [[nan, nan, 7.07e-06, nan, 0.0],
[0.0, -0.00015, nan, 2.3614e-05, nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
labels=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(['s_id', 'dosage', 'agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st': [1, 2, 1, 2, 1, 2],
'2nd': pd.date_range('2014-02-01', periods=6,
freq='D'),
'jim': 100 + np.arange(6),
'joe': (np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
assert left.notna().values.sum() == 2 * len(df)
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame(
[1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex))
.reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(
full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3),
columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(
levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1],
[1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_stack_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
for labels in [list("yxz"), list("yxy")]:
cidx = pd.CategoricalIndex(labels, categories=list("xyz"),
ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MutliIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = pd.MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
chunweiyuan/xarray | xarray/core/nanops.py | 1 | 6781 | import numpy as np
from . import dtypes, nputils
from .duck_array_ops import (
_dask_or_eager_func, count, fillna, isnull, where_method)
from .pycompat import dask_array_type
try:
import dask.array as dask_array
except ImportError:
dask_array = None
def _replace_nan(a, val):
"""
replace nan in a by val, and returns the replaced array and the nan
position
"""
mask = isnull(a)
return where_method(val, mask, a), mask
def _maybe_null_out(result, axis, mask, min_count=1):
"""
xarray version of pandas.core.nanops._maybe_null_out
"""
if hasattr(axis, '__len__'): # if tuple or list
raise ValueError('min_count is not available for reduction '
'with more than one dimensions.')
if axis is not None and getattr(result, 'ndim', False):
null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0
if null_mask.any():
dtype, fill_value = dtypes.maybe_promote(result.dtype)
result = result.astype(dtype)
result[null_mask] = fill_value
elif getattr(result, 'dtype', None) not in dtypes.NAT_TYPES:
null_mask = mask.size - mask.sum()
if null_mask < min_count:
result = np.nan
return result
def _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs):
""" In house nanargmin, nanargmax for object arrays. Always return integer
type
"""
valid_count = count(value, axis=axis)
value = fillna(value, fill_value)
data = _dask_or_eager_func(func)(value, axis=axis, **kwargs)
# TODO This will evaluate dask arrays and might be costly.
if (valid_count == 0).any():
raise ValueError('All-NaN slice encountered')
return data
def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs):
""" In house nanmin and nanmax for object array """
valid_count = count(value, axis=axis)
filled_value = fillna(value, fill_value)
data = getattr(np, func)(filled_value, axis=axis, **kwargs)
if not hasattr(data, 'dtype'): # scalar case
data = dtypes.fill_value(value.dtype) if valid_count == 0 else data
return np.array(data, dtype=value.dtype)
return where_method(data, valid_count != 0)
def nanmin(a, axis=None, out=None):
if a.dtype.kind == 'O':
return _nan_minmax_object(
'min', dtypes.get_pos_infinity(a.dtype), a, axis)
module = dask_array if isinstance(a, dask_array_type) else nputils
return module.nanmin(a, axis=axis)
def nanmax(a, axis=None, out=None):
if a.dtype.kind == 'O':
return _nan_minmax_object(
'max', dtypes.get_neg_infinity(a.dtype), a, axis)
module = dask_array if isinstance(a, dask_array_type) else nputils
return module.nanmax(a, axis=axis)
def nanargmin(a, axis=None):
fill_value = dtypes.get_pos_infinity(a.dtype)
if a.dtype.kind == 'O':
return _nan_argminmax_object('argmin', fill_value, a, axis=axis)
a, mask = _replace_nan(a, fill_value)
if isinstance(a, dask_array_type):
res = dask_array.argmin(a, axis=axis)
else:
res = np.argmin(a, axis=axis)
if mask is not None:
mask = mask.all(axis=axis)
if mask.any():
raise ValueError("All-NaN slice encountered")
return res
def nanargmax(a, axis=None):
fill_value = dtypes.get_neg_infinity(a.dtype)
if a.dtype.kind == 'O':
return _nan_argminmax_object('argmax', fill_value, a, axis=axis)
a, mask = _replace_nan(a, fill_value)
if isinstance(a, dask_array_type):
res = dask_array.argmax(a, axis=axis)
else:
res = np.argmax(a, axis=axis)
if mask is not None:
mask = mask.all(axis=axis)
if mask.any():
raise ValueError("All-NaN slice encountered")
return res
def nansum(a, axis=None, dtype=None, out=None, min_count=None):
a, mask = _replace_nan(a, 0)
result = _dask_or_eager_func('sum')(a, axis=axis, dtype=dtype)
if min_count is not None:
return _maybe_null_out(result, axis, mask, min_count)
else:
return result
def _nanmean_ddof_object(ddof, value, axis=None, **kwargs):
""" In house nanmean. ddof argument will be used in _nanvar method """
from .duck_array_ops import (count, fillna, _dask_or_eager_func,
where_method)
valid_count = count(value, axis=axis)
value = fillna(value, 0)
# As dtype inference is impossible for object dtype, we assume float
# https://github.com/dask/dask/issues/3162
dtype = kwargs.pop('dtype', None)
if dtype is None and value.dtype.kind == 'O':
dtype = value.dtype if value.dtype.kind in ['cf'] else float
data = _dask_or_eager_func('sum')(value, axis=axis, dtype=dtype, **kwargs)
data = data / (valid_count - ddof)
return where_method(data, valid_count != 0)
def nanmean(a, axis=None, dtype=None, out=None):
if a.dtype.kind == 'O':
return _nanmean_ddof_object(0, a, axis=axis, dtype=dtype)
if isinstance(a, dask_array_type):
return dask_array.nanmean(a, axis=axis, dtype=dtype)
return np.nanmean(a, axis=axis, dtype=dtype)
def nanmedian(a, axis=None, out=None):
return _dask_or_eager_func('nanmedian', eager_module=nputils)(a, axis=axis)
def _nanvar_object(value, axis=None, **kwargs):
ddof = kwargs.pop('ddof', 0)
kwargs_mean = kwargs.copy()
kwargs_mean.pop('keepdims', None)
value_mean = _nanmean_ddof_object(ddof=0, value=value, axis=axis,
keepdims=True, **kwargs_mean)
squared = (value.astype(value_mean.dtype) - value_mean)**2
return _nanmean_ddof_object(ddof, squared, axis=axis, **kwargs)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0):
if a.dtype.kind == 'O':
return _nanvar_object(a, axis=axis, dtype=dtype, ddof=ddof)
return _dask_or_eager_func('nanvar', eager_module=nputils)(
a, axis=axis, dtype=dtype, ddof=ddof)
def nanstd(a, axis=None, dtype=None, out=None, ddof=0):
return _dask_or_eager_func('nanstd', eager_module=nputils)(
a, axis=axis, dtype=dtype, ddof=ddof)
def nanprod(a, axis=None, dtype=None, out=None, min_count=None):
a, mask = _replace_nan(a, 1)
result = _dask_or_eager_func('nanprod')(a, axis=axis, dtype=dtype, out=out)
if min_count is not None:
return _maybe_null_out(result, axis, mask, min_count)
else:
return result
def nancumsum(a, axis=None, dtype=None, out=None):
return _dask_or_eager_func('nancumsum', eager_module=nputils)(
a, axis=axis, dtype=dtype)
def nancumprod(a, axis=None, dtype=None, out=None):
return _dask_or_eager_func('nancumprod', eager_module=nputils)(
a, axis=axis, dtype=dtype)
| apache-2.0 |
mblondel/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
krdean/MedPlagSIRs | Bubonic_plague_rat_host_and_flea_vector.py | 1 | 21169 | #!/usr/bin/env python
import numpy as np
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as plt
import json
import pdb
import argparse
import logging
class Rats():
def __init__(self, S, I, R, D, roaming):
self.nu = ((4.57**(1/365.0))-1.0)
self.mu = 0.00068
self.carrying_capacity = 52.0
self.beta = .0641
self.gamma = 1/5.15
self.inherited_resistance = 0.5
self.chance_of_recovery = 0.10
self.roaming = roaming
self.not_roaming = 1.0 - self.roaming
self.S = S
self.I = I
self.R = R
self.D = D
self.N = self.S + self.I + self.R
self.SIR = [self.S, self.I, self.R, self.D]
class Fleas():
def __init__(self, H, S, I, carrying_capacity_per_rat):
self.nu = ((21**(1/365.0))-1.0)
self.mu = 1/5.0 #Off host death rate
self.carrying_capacity = None
self.carrying_capacity_per_rat = carrying_capacity_per_rat
self.searching_efficiency = 0.038
self.H = H #Average number of fleas on host
self.I = I #Free susceptible fleas
self.SIR = [self.H, self.I]
class Humans():
def __init__(self, S, I, R, D):
self.nu = ((1.04**(1/365.0))-1.0)
self.mu = (1.0-(0.96**(1/365.0)))
self.beta = 0.0641
self.gamma = 1/26.0
self.chance_of_recovery = 0.33
self.last_exposed = 0
self.S = S
self.I = I
self.R = R
self.D = D
self.N = self.S + self.I + self.R
self.SIR = [self.S, self.I, self.R, self.D]
class Metapop():
def __init__(self, roaming, carrying_capacity_per_rat):
"Metapopulation variables"
self.infectious_rat_deaths = None
self.neighbors = None
self.force_to_humans = 0
self.force_to_rats = 0
self.neighbor_force_to_rats = 0
self.roaming = roaming
self.carrying_capacity_per_rat = carrying_capacity_per_rat
def _add_host_vector_objects(self):
"Defines healthy rat, flea, human objects"
self.r = Rats(S = 52.0, I = 0.0, R = 0.0, D = 0.0, roaming = self.roaming)
self.f = Fleas(H = self.carrying_capacity_per_rat, S = 0.0, I = 0.0, carrying_capacity_per_rat = self.carrying_capacity_per_rat)
self.h = Humans(S = 52.0, I = 0.0, R = 0.0, D = 0.0)
def _add_infected_host_vector_objects(self):
"Defines infected rat, flea, human objects"
self.r = Rats(S = 51.0, I =1.0, R = 0.0, D = 0.0, roaming = self.roaming)
self.f = Fleas(H = self.carrying_capacity_per_rat, S = 0.0, I = self.carrying_capacity_per_rat, carrying_capacity_per_rat = self.carrying_capacity_per_rat)
self.h = Humans(S = 52.0, I = 0.0, R = 0.0, D = 0.0)
def _rat_equations_inside(self):
"Rat dyniamics inside of a cell"
mu_S = 0
mu_I = 0
mu_R = 0
#Natural deaths
if self.r.S > 0:
mu_S = min (self.r.S, s.distribution((self.r.mu*self.r.S),1))
if self.r.I > 0:
mu_I = min (self.r.I, s.distribution((self.r.mu*self.r.I),1))
if self.r.R > 0:
mu_R = min (self.r.R, s.distribution((self.r.mu*self.r.R),1))
S_births = 0
nu_R = 0
nu_R_p = 0
born_R = 0
S_R_births = 0
new_I = 0
new_RD = 0
new_R = 0
new_D = 0
self.infectious_rat_deaths = 0
self.noninfectious_rat_deaths = 0
if self.r.S > 0 and (self.r.N/self.r.carrying_capacity) < 1:
S_births = s.distribution((self.r.nu*self.r.S*(1-(self.r.N/self.r.carrying_capacity))),1)
if self.r.R > 0 and (self.r.N/self.r.carrying_capacity) < 1:
nu_R = s.distribution((self.r.nu*self.r.R*(1-(self.r.N/self.r.carrying_capacity))),1)
if nu_R > 0:
born_R = min(nu_R, s.distribution((nu_R*self.r.inherited_resistance),1))
S_R_births = nu_R - born_R
if self.r.S > 0 and self.force_to_rats > 0:
new_I = min (self.r.S, s.distribution((self.r.not_roaming*self.r.beta*(self.r.S/self.r.N)*self.force_to_rats),1))
if self.r.I > 0:
new_RD = min (self.r.I, s.distribution((self.r.gamma*self.r.I),1))
new_R = s.distribution((self.r.chance_of_recovery*new_RD),1)
new_D = new_RD - new_R
dS = S_births + S_R_births - new_I - mu_S
dI = new_I - new_RD - mu_I
dR = born_R + new_R - mu_R
dD = new_D + mu_S + mu_I + mu_R
if (new_D + mu_I) > 0:
self.infectious_rat_deaths = new_D + mu_I
self.r.S = self.r.S + dS
self.r.I = self.r.I + dI
self.r.R = self.r.R + dR
self.r.D = self.r.D + dD
self.r.SIR = [self.r.S, self.r.I, self.r.R, self.r.D]
self.r.N = self.r.S + self.r.I + self.r.R
self.f.carrying_capacity = self.r.N*self.f.carrying_capacity_per_rat
def _flea_equations_inside(self):
"Flea dynamics inside of a cell"
flea_growth = 0
new_I = 0
mu_I = 0
self.force_to_humans = 0
self.force_to_rats = 0
#Growth and decay
if self.f.H > 0 and self.f.carrying_capacity > 0 and ((self.f.H*self.r.N)/self.f.carrying_capacity) < 1:
flea_growth = s.distribution((self.f.nu*self.f.H*self.r.N*(1-(self.f.H*self.r.N)/self.f.carrying_capacity)),1)
elif self.f.H > 0 and self.f.carrying_capacity > 0 and ((self.f.H*self.r.N)/self.f.carrying_capacity) > 1:
flea_growth = -s.distribution((self.f.nu*self.f.H*self.r.N*abs(1-(self.f.H*self.r.N/self.f.carrying_capacity))),1)
#Starvation
if self.f.I > 0:
mu_I = min (self.f.I, s.distribution((self.f.mu*self.f.I),1)) #Free infectious fleas dying of starvation
#New free fleas
if self.infectious_rat_deaths > 0 and self.f.H > 0:
new_I = int(self.infectious_rat_deaths*self.f.H)
#Force of infection to humans
if self.f.I > 0 and self.r.N > 0:
self.force_to_humans = min(self.f.I, s.distribution(self.f.I*np.exp(-self.f.searching_efficiency*self.r.N),1))
#Force of infection to rats
self.force_to_rats = self.f.I-self.force_to_humans
total_H_changes = flea_growth + self.force_to_rats
if self.r.N > 0:
avg_H_changes = total_H_changes/self.r.N
else:
avg_H_changes = 0
dH = avg_H_changes
dI = new_I - mu_I
self.f.H = self.f.H + dH #Average number of fleas on rats
self.f.I = self.f.I + dI #Free infectious fleas
self.f.SIR = [self.f.H, self.f.I]
def _human_equations_inside(self):
"Human dynamics inside of a cell"
mu_S = 0
mu_I = 0
mu_R = 0
#Natural Deaths
if self.h.S > 0:
mu_S = min (self.h.S, s.distribution((self.h.mu*self.h.S),1))
if self.h.I > 0:
mu_I = min (self.h.I, s.distribution((self.h.mu*self.h.I),1))
if self.h.R > 0:
mu_R = min (self.h.R, s.distribution((self.h.mu*self.h.R),1))
S_births = 0
R_births = 0
new_I = 0
new_RD = 0
new_R = 0
new_D = 0
if self.h.S > 0:
S_births = s.distribution((self.h.nu*self.h.S),1)
if self.h.R > 0:
R_births = s.distribution((self.h.nu*self.h.R),1)
if self.h.S > 0 and self.force_to_humans > 0:
new_I = min (self.h.S, s.distribution((self.h.beta*(self.h.S/self.h.N)*self.force_to_humans),1))
if self.h.I > 0:
new_RD = min (self.h.I, s.distribution((self.h.gamma*self.h.I),1))
new_R = min(new_RD, s.distribution((self.h.chance_of_recovery*new_RD),1))
new_D = new_RD - new_R
dS = S_births + R_births - new_I - mu_S
dI = new_I - new_RD - mu_I
dR = new_R - mu_R
dD = new_D + mu_S + mu_I + mu_R
self.h.S = self.h.S + dS
self.h.I = self.h.I + dI
self.h.R = self.h.R + dR
self.h.D = self.h.D + dD
self.h.SIR = [self.h.S, self.h.I, self.h.R, self.h.D]
self.h.N = self.h.S + self.h.I + self.h.R
self.h.last_exposed = int(new_I)
def _dynamics_between_metapop(self):
"Equations for rats exposed from neighboring cells"
new_I = 0
if self.N_neighbors_rats > 0 and self.neighbor_force_to_rats > 0 and self.r.S > 0:
new_I = min (self.r.S, s.distribution((self.r.roaming*self.r.beta*(self.r.S/self.N_neighbors_rats)*self.neighbor_force_to_rats),1))
dS = - new_I
dI = new_I
self.r.S = self.r.S+dS
self.r.I = self.r.I+dI
self.r.SIR = [self.r.S, self.r.I, self.r.R, self.r.D]
self.r.N = self.r.S + self.r.I + self.r.R
self.f.carrying_capacity = self.r.N*self.f.carrying_capacity_per_rat
class Community():
def __init__(self, end_time, length, width, roaming, carrying_capacity_per_rat):
"Community variables"
#General
self.infected_metapops = 1
self.time_step = 1.0
self.end_time = end_time
self.length = length
self.width = width
self.size = self.length*self.width
self.community = list(None for i in xrange(self.size))
self.roaming = roaming
self.carrying_capacity_per_rat = carrying_capacity_per_rat
#Human
self.metapop_human_SIR = list(None for i in xrange(self.size))
self.epidemic_solution_human = None
self.initial_size = self.size * 52.0
self.last_exposed_total = 0
#Rat
self.metapop_rat_SIR = list(None for i in xrange(self.size))
self.epidemic_solution_rats = None
#Flea
self.metapop_flea_SIR = list(None for i in xrange(self.size))
self.epidemic_solution_flea = None
#Epidemic
self.epidemic_duration = 0
self.distance = None
self.human_epidemic_duration = 0
def _add_metapops(self):
"Adds metapop objects to the community variable"
for i in xrange(len(self.community)):
self.community[i] = Metapop(roaming = self.roaming, carrying_capacity_per_rat = self.carrying_capacity_per_rat)
self.community[i]._add_host_vector_objects()
def _add_plague(self):
"Adds infected metapop obects to the community variable"
if self.infected_metapops == 0:
pass
else:
for x in xrange(self.infected_metapops):
i = np.random.randint(len(self.community), size = 1)
self.community[i]._add_infected_host_vector_objects()
def _SIR_numbers(self):
"Adds SIR numbers for vector and host objects to get community totals"
for i in xrange(len(self.metapop_rat_SIR)):
self.metapop_rat_SIR[i] = self.community[i].r.SIR
self.community_rat_totals = list(sum(col) for col in zip(*self.metapop_rat_SIR))
for i in xrange(len(self.metapop_flea_SIR)):
self.metapop_flea_SIR[i] = self.community[i].f.SIR
self.community_flea_totals = list(sum(col) for col in zip(*self.metapop_flea_SIR))
for i in xrange(len(self.metapop_human_SIR)):
self.metapop_human_SIR[i] = self.community[i].h.SIR
self.community_human_totals = list(sum(col) for col in zip(*self.metapop_human_SIR))
def _last_exposure(self):
self.last_exposed_total = 0
for i in xrange(self.size):
self.last_exposed_total = self.last_exposed_total + self.community[i].h.last_exposed
def _neighbors(self):
"Identifies neighboring cells"
for i in xrange(len(self.community)):
if i == 0: #TOP LEFT
self.community[i].neighbors = [i+1, i+self.width]
elif 1 <= i <= (self.width-2): #FIRST ROW
self.community[i].neighbors = [i-1, i+1, i+self.width]
elif i == (self.width-1): #TOP RIGHT
self.community[i].neighbors = [i-1, i+self.width]
elif i == ((self.width*self.length)-1): #BOTTOM RIGHT
self.community[i].neighbors = [i-1, i-self.width]
elif ((self.width*self.length)-(self.width-1)) <= i <= ((self.width*self.length)-1): #LAST ROW
self.community[i].neighbors = [i-1, i+1, i-self.width]
elif i == ((self.width*self.length)-(self.width)): #BOTTOM LEFT
self.community[i].neighbors = [i+1, i-self.width]
elif i%(self.width) == 0: #LEFT EDGE
self.community[i].neighbors = [i+1, i+self.width, i-self.width]
elif (i-(self.width-1))%(self.width) == 0: #RIGHT EDGE
self.community[i].neighbors = [i-1, i+self.width, i-self.width]
else: #CENTER
self.community[i].neighbors = [i-1, i-self.width, i+1, i+self.width]
def _calc_neighbors(self):
"Calculates the number of infectious people is nearby cells"
for i in xrange(len(self.community)):
self.community[i].neighbor_force_to_rats = sum(self.community[x].force_to_rats for x in self.community[i].neighbors)
self.community[i].N_neighbors_rats = sum(self.community[x].r.N for x in self.community[i].neighbors)
def _update_community(self):
"Updates SIR numbers for calculations"
map(lambda x:x._rat_equations_inside(), self.community), self._SIR_numbers()
map(lambda x:x._flea_equations_inside(), self.community), self._SIR_numbers()
map(lambda x:x._human_equations_inside(), self.community), self._SIR_numbers()
self._calc_neighbors(), map(lambda x:x._dynamics_between_metapop(), self.community), self._SIR_numbers(), self._last_exposure()
def epidemic(self):
"Creates a community of metapopulation objects"
self._add_metapops(), self._add_plague(), self._SIR_numbers(), self._neighbors()
self.epidemic_solution_rat = [self.community_rat_totals]
self.epidemic_solution_flea = [self.community_flea_totals]
self.epidemic_solution_human = [self.community_human_totals]
t = np.linspace(0.0, self.end_time, (self.end_time+1/self.time_step))
for x in t:
self._update_community()
self.epidemic_solution_rat.append(self.community_rat_totals)
self.epidemic_solution_flea.append(self.community_flea_totals)
self.epidemic_solution_human.append(self.community_human_totals)
if self.last_exposed_total > 0:
self.epidemic_duration = int(x)
if (x-2*26.0) >= self.epidemic_duration and self.infected_metapops > 0: #26 avg latent + infectious period
break
human_solution_array = np.array(self.epidemic_solution_human)
I_human_list = human_solution_array[:,1]
first_exposed = np.argmax(I_human_list>0)
if self.epidemic_duration > 0:
self.human_epidemic_duration = self.epidemic_duration - (first_exposed-1)
def pointDist(self, x0, y0):
if y0 > 0:
x0=x0/1000.0 #duration (months)
y0=y0/30.0 #pop size (thousands)
y=3.031+(.132*x0) #olea line
pointDist=abs(y-y0) #distance
self.distance = pointDist
else:
self.distance = None
def output_results(self):
self.deaths = int(self.epidemic_solution_human[self.epidemic_duration][3])
self.pointDist(self.initial_size, self.human_epidemic_duration)
data = [int(self.initial_size), int(self.epidemic_duration), int(self.human_epidemic_duration), self.distance, int(self.deaths), int(self.deaths/self.initial_size*100), self.roaming, self.carrying_capacity_per_rat]
jsondata = json.dumps(data)
if self.distance > 0.0:
s.output.write(jsondata)
s.outputraw.write(jsondata)
print data
def graph(self):
self._graph_human()
self._graph_rat()
self._graph_flea()
def _graph_human(self):
"Exports graph of solution to svg file"
#Makes array with columns that are made into new arrays for plotting classes
epidemic_solution_array_human = np.array(self.epidemic_solution_human)
human_S_class = epidemic_solution_array_human[:,0]
human_I_class = epidemic_solution_array_human[:,1]
human_R_class = epidemic_solution_array_human[:,2]
human_D_class = epidemic_solution_array_human[:,3]
#Exports graph for solutution to svg file
plt.subplot(4,1,1)
S_line = plt.plot(human_S_class, label='Susceptible', linewidth=2, color='g')
I_line =plt.plot(human_I_class, label='Infectious', linewidth=2, color='m')
R_line = plt.plot(human_R_class, label='Recovered', linewidth=2, color='b')
D_line = plt.plot(human_D_class, label='Dead', linewidth=2, color='r')
plt.legend(loc=1)
plt.xlabel('Days')
plt.ylabel('Humans')
plt.title('Bubonic with Rats and Fleas without infection')
plt.grid()
plt.savefig('rat_model_no_plague.svg')
def _graph_rat(self):
#Makes array with columns that are made into new arrays for plotting classes
epidemic_solution_array_rat = np.array(self.epidemic_solution_rat)
rat_S_class = epidemic_solution_array_rat[:,0]
rat_I_class = epidemic_solution_array_rat[:,1]
rat_R_class = epidemic_solution_array_rat[:,2]
rat_D_class = epidemic_solution_array_rat[:,3]
#Exports graph for solutution to svg file
plt.subplot(4,1,2)
rat_S_line = plt.plot(rat_S_class, label='Susceptible', linewidth=2, color='g')
rat_I_line =plt.plot(rat_I_class, label='Infectious', linewidth=2, color='m')
rat_R_line = plt.plot(rat_R_class, label='Recovered', linewidth=2, color='b')
rat_D_line = plt.plot(rat_D_class, label='Dead', linewidth=2, color='r')
plt.legend(loc=1)
plt.xlabel('Days')
plt.ylabel('Rats')
plt.grid()
plt.savefig('rat_model_no_plague.svg')
def _graph_flea(self):
epidemic_solution_array_flea = np.array(self.epidemic_solution_flea)
flea_average = epidemic_solution_array_flea[:,0]
flea_I_class = epidemic_solution_array_flea[:,1]
#Exports graph for solutution to svg file
plt.subplot(4,1,3)
flea_I_line = plt.plot(flea_I_class, label='Free infectious', linewidth=2, color='y')
plt.legend(loc=1)
plt.xlabel('Days')
plt.ylabel('Fleas')
plt.grid()
plt.savefig('rat_model_no_plague.svg')
plt.subplot(4,1,4)
flea_average = plt.plot(flea_average/self.size, label='Average per rat', linewidth=2, color='b')
plt.legend(loc=1)
plt.xlabel('Days')
plt.ylabel('Flea index')
plt.grid()
plt.savefig('rat_model_no_plague.svg')
class Simulator():
def __init__(self, size_range_min, size_range_max, repeat, randavg):
self.size_range_min = size_range_min
self.size_range_max = size_range_max
self.repeat = repeat
self.simulation_results = []
self.randavg = randavg #enable or disable random avg
self.output = open("rat_simulation_outputtest.txt", 'w')
self.outputraw = open("rat_simulation_outputtestraw.txt", 'w')
self.distance_solution = []
def distribution(self, lam, size):
log.debug(' lam %f' % lam)
log.debug(' size %i' % size)
if not s.randavg:
return np.random.poisson(lam, size)[0]
else:
return lam
def simulation(self):
m = 0.3
K_f = 4.0
for i in range(self.size_range_min, self.size_range_max+1):
r = 0
while r < self.repeat:
c = Community(width = i, length = i, end_time = 10000.0, roaming = m, carrying_capacity_per_rat = K_f)
c.epidemic()
c.output_results()
if c.distance > 0.0:
self.distance_solution.append(c.distance)
r = r+1
c.graph()
#print np.mean(self.distance_solution)
self.output.close()
s.outputraw.close()
if __name__ == "__main__":
# Parser bits
parser = argparse.ArgumentParser(description='Rat_flea_human Simulator')
parser.add_argument('--debug', dest='debug', action='store_true',
default=False, help='enable debug mode')
parser.add_argument('--norandom', dest='norand', action='store_true',
default=False, help='disables randomness')
args = parser.parse_args()
#Build logging
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('Rat_flea_human')
log.debug(args.norand)
#Run the simulations
size_range_min = 10
size_range_max = 10
repeat = 1
s = Simulator(size_range_min, size_range_max, repeat, args.norand)
s.simulation()
| gpl-3.0 |
grain2011/vislab | vislab/_results.py | 4 | 5575 | import os
import pandas as pd
import cPickle
import numpy as np
import vislab
def load_pred_results(collection_name, cache_dirname, multiclass=False, force=False):
"""
Return DataFrame of prediction experiment results and Panel of per-image
predictions.
"""
if not os.path.exists(cache_dirname):
vislab.util.makedirs(cache_dirname)
results_df_filename = os.path.join(
cache_dirname, '{}_results_df.pickle'.format(collection_name))
preds_panel_filename = os.path.join(
cache_dirname, '{}_preds_panel.pickle'.format(collection_name))
# If cache exists, load and return.
if (os.path.exists(results_df_filename) and
os.path.exists(preds_panel_filename) and
not force):
results_df = pd.read_pickle(results_df_filename)
preds_panel = pd.read_pickle(preds_panel_filename)
print("Loaded from cache: {} records".format(results_df.shape[0]))
return results_df, preds_panel
# Otherwise, construct from database.
client = vislab.util.get_mongodb_client()
collection = client['predict'][collection_name]
print("Results in collection {}: {}".format(collection_name, collection.count()))
df = pd.DataFrame(list(collection.find()))
df.index = df.index.astype(str)
# Make the features list hashable for filtering/joins.
df['features_str'] = df['features'].apply(lambda x: ','.join(sorted(x)))
# We need a unique representation of the predictor settings.
df['setting'] = df.apply(lambda x: '{} {} {}'.format(x['features_str'], x['quadratic'], 'vw'), axis=1)
# And of the task performed.
df['full_task'] = df.apply(lambda x: '{} {}'.format(x['task'], x['data']), axis=1)
df = df.drop_duplicates(cols=['features_str', 'full_task'], take_last=True)
# Just for printing, if needed.
df = df.sort(['full_task', 'setting'])
# Get all predictions in a separate panel and drop the pickled ones.
if multiclass:
data = {}
for setting in df['setting'].unique():
el = df[df['setting'] == setting].iloc[0]
try:
pred_df = cPickle.loads(el['pred_df'])
except:
assert('results_name' in el)
pred_df_filename = '{}/{}.h5'.format(
vislab.config['paths']['results'], el['results_name'])
#print(pred_df_filename)
pred_df = pd.read_hdf(pred_df_filename, 'df')
# Not sure why there should ever be duplicate indices, but
# there are for one of the wikipaintings results...
pred_df['__index'] = pred_df.index
pred_df.drop_duplicates(cols='__index', take_last=True, inplace=True)
del pred_df['__index']
data[setting] = pred_df
preds_panel = pd.Panel(data).swapaxes('items', 'minor')
else:
preds_panel = get_all_preds_panel(df)
try:
del df['pred_df']
except KeyError:
pass
df.to_pickle(results_df_filename)
preds_panel.to_pickle(preds_panel_filename)
return df, preds_panel
def get_all_preds_panel(df):
all_full_tasks = df['full_task'].unique()
data = dict((
(full_task, get_all_preds_df(df, full_task))
for full_task in all_full_tasks
))
all_preds_panel = pd.Panel(data)
return all_preds_panel
def get_all_preds_df(df, full_task):
"""
Get the DataFrame of predictions from the results dataframe.
Tip: get all predictions of an image with
all_preds_panel.major_xs('f_1604904579').T
"""
tdf = df[df['full_task'] == full_task]
# Make sure that there are no duplicate settings.
if len(tdf.setting.unique()) != tdf.shape[0]:
try:
del df['pred_df']
except KeyError:
pass
print(tdf.to_string())
raise Exception("Non-unique feature-setting pairs")
pred_dfs = []
for i, row in tdf.iterrows():
try:
pred_df = cPickle.loads(row['pred_df'])
except:
assert('results_name' in row)
pred_df_filename = '{}/{}.h5'.format(
vislab.config['paths']['results'], row['results_name'])
pred_df = pd.read_hdf(pred_df_filename, 'df')
pred_df.index = pred_df.index.astype(str)
pred_dfs.append(pred_df)
# Make sure that all the settings had the same label and split information
arbitrary_pred_df = pred_dfs[0]
assert(np.all(df_['label'] == arbitrary_pred_df['label'] for df_ in pred_dfs))
assert(np.all(df_['split'] == arbitrary_pred_df['split'] for df_ in pred_dfs))
data = []
for df_ in pred_dfs:
df_["index"] = df_.index
# TODO: why the fuck are the duplicate indices???
df_ = df_.drop_duplicates('index')
if 'score' in df_.columns:
data.append(df_['score'])
else:
# TODO: temporary, remove when all experiments are re-run
data.append(df_['pred'])
all_preds_df = pd.DataFrame(data, index=tdf['setting']).T
all_preds_df['label'] = arbitrary_pred_df['label']
all_preds_df['split'] = arbitrary_pred_df['split']
# # Re-order columns
# # columns = all_preds_df.columns.values
# # reordered_columns = ['split', 'label'] + (columns - ['split', 'label']).tolist()
# # all_preds_df = all_preds_df[:, reordered_columns]
all_preds_df.index = all_preds_df.index.astype(str)
return all_preds_df
if __name__ == '__main__':
load_pred_results('wikipaintings_oct25', 'whatever', multiclass=True)
| bsd-2-clause |
nelango/ViralityAnalysis | model/lib/sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| mit |
btabibian/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
andreshp/Algorithms | Problems/Hackerrank/IndeedPrime/6_mean.py | 1 | 1669 | #!/usr/bin/python
#######################################################################
# Author: Andrés Herrera Poyatos
# Universidad de Granada, June, 2015
# Indeed Prime Challengue
# Problem 6
########################################################################
import numpy
import math
from sklearn import ensemble
#-------------------------------- FUNCTIONS --------------------------------#
def preprocessing(new_line):
instance = [None] * 6
first_split = new_line.split()
first_part_split = first_split[0].split("-")
second_part_split = first_split[2].split("-")
instance[0] = int(first_part_split[0])
instance[1] = int(first_part_split[1])
instance[2] = int(first_part_split[2])
instance[3] = int(second_part_split[0])
instance[4] = int(second_part_split[1])
instance[5] = int(second_part_split[2])
return instance, int(first_split[3]) if len(first_split[3].split("_")) == 1 else None
ROWS = 595
TEST_ROWS = 50
TRAIN_ROWS = ROWS - TEST_ROWS
###############################################################################
# Load data
x = []; y = []
for i in range(0, ROWS):
instance, value = preprocessing(input())
x.append(instance)
y.append(value)
x_train, y_train = x[:TRAIN_ROWS], y[:TRAIN_ROWS]
x_test, y_test = x[TRAIN_ROWS:], y[TRAIN_ROWS:]
###############################################################################
y = []
for i in range(0, len(x_test)):
count = 0
value = 0
for j in range(300, TRAIN_ROWS):
if x_train[1] == x_test[1]:
count += 1
value += y_train[i]
y.append(value / count)
for p in y:
print(math.trunc(p)) | gpl-2.0 |
bxin/cwfs | tests/testValidate.py | 1 | 4644 | import unittest
import lsst.utils.tests
import os
import pytest
import sys
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError as e:
print("Initialising testValidate.py:", e)
plt = None
from lsst.cwfs.instrument import Instrument
from lsst.cwfs.algorithm import Algorithm
from lsst.cwfs.image import Image, readFile
class MatlabValidationTestCase(lsst.utils.tests.TestCase):
"""Demo test case."""
@classmethod
def setUpClass(cls):
""" setup any state specific to the execution of the given class (which
usually contains tests).
"""
cls.rootdir = os.path.dirname(__file__)
cls.myinst = 'lsst'
cls.validationDir = os.path.join(str(cls.rootdir), 'validation')
cls.tests = [
('testImages/F1.23_1mm_v61', 'z7_0.25_%s.txt', (0, 0), ('fft',), 'paraxial'),
('testImages/LSST_C_SN26', 'z7_0.25_%s.txt', (0, 0), ('fft', 'exp'), 'onAxis'),
('testImages/LSST_NE_SN25', 'z11_0.25_%s.txt', (1.185, 1.185), ('fft', 'exp'), 'offAxis'),
]
# filenames with matlab results and tolerance on absolute discrepancy (in nm)
#
# N.b. these tolerances are set at 10nm because centering algorithm has changed.
# difference in the wavefront on the ~10nm is well below noise level.
#
cls.matlabZFile_Tol = [('F1.23_1mm_v61_z7_0.25_fft.txt', 10),#
('LSST_C_SN26_z7_0.25_fft.txt', 10),
('LSST_C_SN26_z7_0.25_exp.txt', 10),
('LSST_NE_SN25_z11_0.25_fft.txt', 10),
('LSST_NE_SN25_z11_0.25_exp.txt', 10),
]
#
# Check that we have the right number of matlab files. Not really a unit test, just consistency
#
cls.nTest = 0
for inDir, filenameFmt, fldxy, algorithms, model in cls.tests:
cls.nTest += len(algorithms)
assert cls.nTest == len(cls.matlabZFile_Tol)
cls.Zernike0 = 4 # first Zernike to fit
znmax = 22 # last Zernike to fit
cls.x = range(cls.Zernike0, znmax + 1)
def testMatlab(self):
global doPlot
if doPlot:
fig = plt.figure(figsize=(10, 10))
j = 0 # counter for matlab outputs, self.matlabZFile_Tol
for imgDir, filenameFmt, fldxy, algorithms, model in self.tests:
imgDir = os.path.join(str(self.rootdir), imgDir)
intraFile = os.path.join(imgDir, filenameFmt % "intra")
I1 = Image(readFile(intraFile), fldxy, Image.INTRA)
extraFile = os.path.join(imgDir, filenameFmt % "extra")
I2 = Image(readFile(extraFile), fldxy, Image.EXTRA)
inst = Instrument(self.myinst, I1.sizeinPix)
for algorithm in algorithms:
matlabZFile, tol = self.matlabZFile_Tol[j]; j += 1
algo = Algorithm(algorithm, inst, 1)
algo.runIt(inst, I1, I2, model)
zer = algo.zer4UpNm
matZ = np.loadtxt(os.path.join(self.validationDir, matlabZFile))
aerr = np.abs(matZ - zer)
print("%-31s max(abs(err)) = %8.3g median(abs(err)) = %8.3g [Z_%d], tol=%.0f nm" %
(matlabZFile, np.max(aerr), np.median(aerr), self.Zernike0 + np.argmax(aerr), tol))
if doPlot:
ax = plt.subplot(self.nTest, 1, j)
plt.plot(self.x, matZ, label='Matlab', marker='o', color='r', markersize=10)
plt.plot(self.x, zer, label='Python', marker='.', color='b', markersize=10)
plt.axvline(self.Zernike0 + np.argmax(aerr), ls=':', color='black')
plt.legend(loc="best", shadow=True, title=matlabZFile, fancybox=True)
ax.get_legend().get_title().set_color("red")
plt.xlim(self.x[0] - 0.5, self.x[-1] + 0.5)
assert np.max(aerr) < tol
if doPlot:
plt.show()
if False: # used for C++, and triggers a file descriptor leak from matplotlib
class MemoryTester(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
try:
doPlot # should I plot things?
except NameError:
doPlot = False
if __name__ == "__main__":
doPlot = sys.stdout.isatty() # enable matplotlib when run from a terminal
lsst.utils.tests.init()
unittest.main()
| gpl-3.0 |
guschmue/tensorflow | tensorflow/python/estimator/inputs/pandas_io_test.py | 89 | 8340 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(TypeError,
'shuffle must be explicitly set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.